0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #define pr_fmt(fmt) "ACPI: OSL: " fmt
0013
0014 #include <linux/module.h>
0015 #include <linux/kernel.h>
0016 #include <linux/slab.h>
0017 #include <linux/mm.h>
0018 #include <linux/highmem.h>
0019 #include <linux/lockdep.h>
0020 #include <linux/pci.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/kmod.h>
0023 #include <linux/delay.h>
0024 #include <linux/workqueue.h>
0025 #include <linux/nmi.h>
0026 #include <linux/acpi.h>
0027 #include <linux/efi.h>
0028 #include <linux/ioport.h>
0029 #include <linux/list.h>
0030 #include <linux/jiffies.h>
0031 #include <linux/semaphore.h>
0032 #include <linux/security.h>
0033
0034 #include <asm/io.h>
0035 #include <linux/uaccess.h>
0036 #include <linux/io-64-nonatomic-lo-hi.h>
0037
0038 #include "acpica/accommon.h"
0039 #include "internal.h"
0040
0041
0042 #define _COMPONENT ACPI_OS_SERVICES
0043 ACPI_MODULE_NAME("osl");
0044
0045 struct acpi_os_dpc {
0046 acpi_osd_exec_callback function;
0047 void *context;
0048 struct work_struct work;
0049 };
0050
0051 #ifdef ENABLE_DEBUGGER
0052 #include <linux/kdb.h>
0053
0054
0055 int acpi_in_debugger;
0056 EXPORT_SYMBOL(acpi_in_debugger);
0057 #endif
0058
0059 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
0060 u32 pm1b_ctrl);
0061 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
0062 u32 val_b);
0063
0064 static acpi_osd_handler acpi_irq_handler;
0065 static void *acpi_irq_context;
0066 static struct workqueue_struct *kacpid_wq;
0067 static struct workqueue_struct *kacpi_notify_wq;
0068 static struct workqueue_struct *kacpi_hotplug_wq;
0069 static bool acpi_os_initialized;
0070 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
0071 bool acpi_permanent_mmap = false;
0072
0073
0074
0075
0076
0077 struct acpi_ioremap {
0078 struct list_head list;
0079 void __iomem *virt;
0080 acpi_physical_address phys;
0081 acpi_size size;
0082 union {
0083 unsigned long refcount;
0084 struct rcu_work rwork;
0085 } track;
0086 };
0087
0088 static LIST_HEAD(acpi_ioremaps);
0089 static DEFINE_MUTEX(acpi_ioremap_lock);
0090 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map)
0091
0092 static void __init acpi_request_region (struct acpi_generic_address *gas,
0093 unsigned int length, char *desc)
0094 {
0095 u64 addr;
0096
0097
0098 memcpy(&addr, &gas->address, sizeof(addr));
0099 if (!addr || !length)
0100 return;
0101
0102
0103 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
0104 request_region(addr, length, desc);
0105 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
0106 request_mem_region(addr, length, desc);
0107 }
0108
0109 static int __init acpi_reserve_resources(void)
0110 {
0111 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
0112 "ACPI PM1a_EVT_BLK");
0113
0114 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
0115 "ACPI PM1b_EVT_BLK");
0116
0117 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
0118 "ACPI PM1a_CNT_BLK");
0119
0120 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
0121 "ACPI PM1b_CNT_BLK");
0122
0123 if (acpi_gbl_FADT.pm_timer_length == 4)
0124 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
0125
0126 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
0127 "ACPI PM2_CNT_BLK");
0128
0129
0130
0131 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
0132 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
0133 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
0134
0135 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
0136 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
0137 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
0138
0139 return 0;
0140 }
0141 fs_initcall_sync(acpi_reserve_resources);
0142
0143 void acpi_os_printf(const char *fmt, ...)
0144 {
0145 va_list args;
0146 va_start(args, fmt);
0147 acpi_os_vprintf(fmt, args);
0148 va_end(args);
0149 }
0150 EXPORT_SYMBOL(acpi_os_printf);
0151
0152 void acpi_os_vprintf(const char *fmt, va_list args)
0153 {
0154 static char buffer[512];
0155
0156 vsprintf(buffer, fmt, args);
0157
0158 #ifdef ENABLE_DEBUGGER
0159 if (acpi_in_debugger) {
0160 kdb_printf("%s", buffer);
0161 } else {
0162 if (printk_get_level(buffer))
0163 printk("%s", buffer);
0164 else
0165 printk(KERN_CONT "%s", buffer);
0166 }
0167 #else
0168 if (acpi_debugger_write_log(buffer) < 0) {
0169 if (printk_get_level(buffer))
0170 printk("%s", buffer);
0171 else
0172 printk(KERN_CONT "%s", buffer);
0173 }
0174 #endif
0175 }
0176
0177 #ifdef CONFIG_KEXEC
0178 static unsigned long acpi_rsdp;
0179 static int __init setup_acpi_rsdp(char *arg)
0180 {
0181 return kstrtoul(arg, 16, &acpi_rsdp);
0182 }
0183 early_param("acpi_rsdp", setup_acpi_rsdp);
0184 #endif
0185
0186 acpi_physical_address __init acpi_os_get_root_pointer(void)
0187 {
0188 acpi_physical_address pa;
0189
0190 #ifdef CONFIG_KEXEC
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) {
0201 acpi_arch_set_root_pointer(acpi_rsdp);
0202 return acpi_rsdp;
0203 }
0204 #endif
0205 pa = acpi_arch_get_root_pointer();
0206 if (pa)
0207 return pa;
0208
0209 if (efi_enabled(EFI_CONFIG_TABLES)) {
0210 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
0211 return efi.acpi20;
0212 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
0213 return efi.acpi;
0214 pr_err("System description tables not found\n");
0215 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
0216 acpi_find_root_pointer(&pa);
0217 }
0218
0219 return pa;
0220 }
0221
0222
0223 static struct acpi_ioremap *
0224 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
0225 {
0226 struct acpi_ioremap *map;
0227
0228 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
0229 if (map->phys <= phys &&
0230 phys + size <= map->phys + map->size)
0231 return map;
0232
0233 return NULL;
0234 }
0235
0236
0237 static void __iomem *
0238 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
0239 {
0240 struct acpi_ioremap *map;
0241
0242 map = acpi_map_lookup(phys, size);
0243 if (map)
0244 return map->virt + (phys - map->phys);
0245
0246 return NULL;
0247 }
0248
0249 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
0250 {
0251 struct acpi_ioremap *map;
0252 void __iomem *virt = NULL;
0253
0254 mutex_lock(&acpi_ioremap_lock);
0255 map = acpi_map_lookup(phys, size);
0256 if (map) {
0257 virt = map->virt + (phys - map->phys);
0258 map->track.refcount++;
0259 }
0260 mutex_unlock(&acpi_ioremap_lock);
0261 return virt;
0262 }
0263 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
0264
0265
0266 static struct acpi_ioremap *
0267 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
0268 {
0269 struct acpi_ioremap *map;
0270
0271 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
0272 if (map->virt <= virt &&
0273 virt + size <= map->virt + map->size)
0274 return map;
0275
0276 return NULL;
0277 }
0278
0279 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
0280
0281 #define should_use_kmap(pfn) 0
0282 #else
0283 #define should_use_kmap(pfn) page_is_ram(pfn)
0284 #endif
0285
0286 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
0287 {
0288 unsigned long pfn;
0289
0290 pfn = pg_off >> PAGE_SHIFT;
0291 if (should_use_kmap(pfn)) {
0292 if (pg_sz > PAGE_SIZE)
0293 return NULL;
0294 return (void __iomem __force *)kmap(pfn_to_page(pfn));
0295 } else
0296 return acpi_os_ioremap(pg_off, pg_sz);
0297 }
0298
0299 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
0300 {
0301 unsigned long pfn;
0302
0303 pfn = pg_off >> PAGE_SHIFT;
0304 if (should_use_kmap(pfn))
0305 kunmap(pfn_to_page(pfn));
0306 else
0307 iounmap(vaddr);
0308 }
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323 void __iomem __ref
0324 *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
0325 {
0326 struct acpi_ioremap *map;
0327 void __iomem *virt;
0328 acpi_physical_address pg_off;
0329 acpi_size pg_sz;
0330
0331 if (phys > ULONG_MAX) {
0332 pr_err("Cannot map memory that high: 0x%llx\n", phys);
0333 return NULL;
0334 }
0335
0336 if (!acpi_permanent_mmap)
0337 return __acpi_map_table((unsigned long)phys, size);
0338
0339 mutex_lock(&acpi_ioremap_lock);
0340
0341 map = acpi_map_lookup(phys, size);
0342 if (map) {
0343 map->track.refcount++;
0344 goto out;
0345 }
0346
0347 map = kzalloc(sizeof(*map), GFP_KERNEL);
0348 if (!map) {
0349 mutex_unlock(&acpi_ioremap_lock);
0350 return NULL;
0351 }
0352
0353 pg_off = round_down(phys, PAGE_SIZE);
0354 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
0355 virt = acpi_map(phys, size);
0356 if (!virt) {
0357 mutex_unlock(&acpi_ioremap_lock);
0358 kfree(map);
0359 return NULL;
0360 }
0361
0362 INIT_LIST_HEAD(&map->list);
0363 map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
0364 map->phys = pg_off;
0365 map->size = pg_sz;
0366 map->track.refcount = 1;
0367
0368 list_add_tail_rcu(&map->list, &acpi_ioremaps);
0369
0370 out:
0371 mutex_unlock(&acpi_ioremap_lock);
0372 return map->virt + (phys - map->phys);
0373 }
0374 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
0375
0376 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
0377 {
0378 return (void *)acpi_os_map_iomem(phys, size);
0379 }
0380 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
0381
0382 static void acpi_os_map_remove(struct work_struct *work)
0383 {
0384 struct acpi_ioremap *map = container_of(to_rcu_work(work),
0385 struct acpi_ioremap,
0386 track.rwork);
0387
0388 acpi_unmap(map->phys, map->virt);
0389 kfree(map);
0390 }
0391
0392
0393 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
0394 {
0395 if (--map->track.refcount)
0396 return;
0397
0398 list_del_rcu(&map->list);
0399
0400 INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
0401 queue_rcu_work(system_wq, &map->track.rwork);
0402 }
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
0419 {
0420 struct acpi_ioremap *map;
0421
0422 if (!acpi_permanent_mmap) {
0423 __acpi_unmap_table(virt, size);
0424 return;
0425 }
0426
0427 mutex_lock(&acpi_ioremap_lock);
0428
0429 map = acpi_map_lookup_virt(virt, size);
0430 if (!map) {
0431 mutex_unlock(&acpi_ioremap_lock);
0432 WARN(true, "ACPI: %s: bad address %p\n", __func__, virt);
0433 return;
0434 }
0435 acpi_os_drop_map_ref(map);
0436
0437 mutex_unlock(&acpi_ioremap_lock);
0438 }
0439 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
0440
0441
0442
0443
0444
0445
0446 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
0447 {
0448 acpi_os_unmap_iomem((void __iomem *)virt, size);
0449 }
0450 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
0451
0452 void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas)
0453 {
0454 u64 addr;
0455
0456 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
0457 return NULL;
0458
0459
0460 memcpy(&addr, &gas->address, sizeof(addr));
0461 if (!addr || !gas->bit_width)
0462 return NULL;
0463
0464 return acpi_os_map_iomem(addr, gas->bit_width / 8);
0465 }
0466 EXPORT_SYMBOL(acpi_os_map_generic_address);
0467
0468 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
0469 {
0470 u64 addr;
0471 struct acpi_ioremap *map;
0472
0473 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
0474 return;
0475
0476
0477 memcpy(&addr, &gas->address, sizeof(addr));
0478 if (!addr || !gas->bit_width)
0479 return;
0480
0481 mutex_lock(&acpi_ioremap_lock);
0482
0483 map = acpi_map_lookup(addr, gas->bit_width / 8);
0484 if (!map) {
0485 mutex_unlock(&acpi_ioremap_lock);
0486 return;
0487 }
0488 acpi_os_drop_map_ref(map);
0489
0490 mutex_unlock(&acpi_ioremap_lock);
0491 }
0492 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
0493
0494 #ifdef ACPI_FUTURE_USAGE
0495 acpi_status
0496 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
0497 {
0498 if (!phys || !virt)
0499 return AE_BAD_PARAMETER;
0500
0501 *phys = virt_to_phys(virt);
0502
0503 return AE_OK;
0504 }
0505 #endif
0506
0507 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
0508 static bool acpi_rev_override;
0509
0510 int __init acpi_rev_override_setup(char *str)
0511 {
0512 acpi_rev_override = true;
0513 return 1;
0514 }
0515 __setup("acpi_rev_override", acpi_rev_override_setup);
0516 #else
0517 #define acpi_rev_override false
0518 #endif
0519
0520 #define ACPI_MAX_OVERRIDE_LEN 100
0521
0522 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
0523
0524 acpi_status
0525 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
0526 acpi_string *new_val)
0527 {
0528 if (!init_val || !new_val)
0529 return AE_BAD_PARAMETER;
0530
0531 *new_val = NULL;
0532 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
0533 pr_info("Overriding _OS definition to '%s'\n", acpi_os_name);
0534 *new_val = acpi_os_name;
0535 }
0536
0537 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
0538 pr_info("Overriding _REV return value to 5\n");
0539 *new_val = (char *)5;
0540 }
0541
0542 return AE_OK;
0543 }
0544
0545 static irqreturn_t acpi_irq(int irq, void *dev_id)
0546 {
0547 u32 handled;
0548
0549 handled = (*acpi_irq_handler) (acpi_irq_context);
0550
0551 if (handled) {
0552 acpi_irq_handled++;
0553 return IRQ_HANDLED;
0554 } else {
0555 acpi_irq_not_handled++;
0556 return IRQ_NONE;
0557 }
0558 }
0559
0560 acpi_status
0561 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
0562 void *context)
0563 {
0564 unsigned int irq;
0565
0566 acpi_irq_stats_init();
0567
0568
0569
0570
0571
0572 if (gsi != acpi_gbl_FADT.sci_interrupt)
0573 return AE_BAD_PARAMETER;
0574
0575 if (acpi_irq_handler)
0576 return AE_ALREADY_ACQUIRED;
0577
0578 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
0579 pr_err("SCI (ACPI GSI %d) not registered\n", gsi);
0580 return AE_OK;
0581 }
0582
0583 acpi_irq_handler = handler;
0584 acpi_irq_context = context;
0585 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
0586 pr_err("SCI (IRQ%d) allocation failed\n", irq);
0587 acpi_irq_handler = NULL;
0588 return AE_NOT_ACQUIRED;
0589 }
0590 acpi_sci_irq = irq;
0591
0592 return AE_OK;
0593 }
0594
0595 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
0596 {
0597 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
0598 return AE_BAD_PARAMETER;
0599
0600 free_irq(acpi_sci_irq, acpi_irq);
0601 acpi_irq_handler = NULL;
0602 acpi_sci_irq = INVALID_ACPI_IRQ;
0603
0604 return AE_OK;
0605 }
0606
0607
0608
0609
0610
0611 void acpi_os_sleep(u64 ms)
0612 {
0613 msleep(ms);
0614 }
0615
0616 void acpi_os_stall(u32 us)
0617 {
0618 while (us) {
0619 u32 delay = 1000;
0620
0621 if (delay > us)
0622 delay = us;
0623 udelay(delay);
0624 touch_nmi_watchdog();
0625 us -= delay;
0626 }
0627 }
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638 u64 acpi_os_get_timer(void)
0639 {
0640 return (get_jiffies_64() - INITIAL_JIFFIES) *
0641 (ACPI_100NSEC_PER_SEC / HZ);
0642 }
0643
0644 acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
0645 {
0646 u32 dummy;
0647
0648 if (value)
0649 *value = 0;
0650 else
0651 value = &dummy;
0652
0653 if (width <= 8) {
0654 *value = inb(port);
0655 } else if (width <= 16) {
0656 *value = inw(port);
0657 } else if (width <= 32) {
0658 *value = inl(port);
0659 } else {
0660 pr_debug("%s: Access width %d not supported\n", __func__, width);
0661 return AE_BAD_PARAMETER;
0662 }
0663
0664 return AE_OK;
0665 }
0666
0667 EXPORT_SYMBOL(acpi_os_read_port);
0668
0669 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
0670 {
0671 if (width <= 8) {
0672 outb(value, port);
0673 } else if (width <= 16) {
0674 outw(value, port);
0675 } else if (width <= 32) {
0676 outl(value, port);
0677 } else {
0678 pr_debug("%s: Access width %d not supported\n", __func__, width);
0679 return AE_BAD_PARAMETER;
0680 }
0681
0682 return AE_OK;
0683 }
0684
0685 EXPORT_SYMBOL(acpi_os_write_port);
0686
0687 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
0688 {
0689
0690 switch (width) {
0691 case 8:
0692 *(u8 *) value = readb(virt_addr);
0693 break;
0694 case 16:
0695 *(u16 *) value = readw(virt_addr);
0696 break;
0697 case 32:
0698 *(u32 *) value = readl(virt_addr);
0699 break;
0700 case 64:
0701 *(u64 *) value = readq(virt_addr);
0702 break;
0703 default:
0704 return -EINVAL;
0705 }
0706
0707 return 0;
0708 }
0709
0710 acpi_status
0711 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
0712 {
0713 void __iomem *virt_addr;
0714 unsigned int size = width / 8;
0715 bool unmap = false;
0716 u64 dummy;
0717 int error;
0718
0719 rcu_read_lock();
0720 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
0721 if (!virt_addr) {
0722 rcu_read_unlock();
0723 virt_addr = acpi_os_ioremap(phys_addr, size);
0724 if (!virt_addr)
0725 return AE_BAD_ADDRESS;
0726 unmap = true;
0727 }
0728
0729 if (!value)
0730 value = &dummy;
0731
0732 error = acpi_os_read_iomem(virt_addr, value, width);
0733 BUG_ON(error);
0734
0735 if (unmap)
0736 iounmap(virt_addr);
0737 else
0738 rcu_read_unlock();
0739
0740 return AE_OK;
0741 }
0742
0743 acpi_status
0744 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
0745 {
0746 void __iomem *virt_addr;
0747 unsigned int size = width / 8;
0748 bool unmap = false;
0749
0750 rcu_read_lock();
0751 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
0752 if (!virt_addr) {
0753 rcu_read_unlock();
0754 virt_addr = acpi_os_ioremap(phys_addr, size);
0755 if (!virt_addr)
0756 return AE_BAD_ADDRESS;
0757 unmap = true;
0758 }
0759
0760 switch (width) {
0761 case 8:
0762 writeb(value, virt_addr);
0763 break;
0764 case 16:
0765 writew(value, virt_addr);
0766 break;
0767 case 32:
0768 writel(value, virt_addr);
0769 break;
0770 case 64:
0771 writeq(value, virt_addr);
0772 break;
0773 default:
0774 BUG();
0775 }
0776
0777 if (unmap)
0778 iounmap(virt_addr);
0779 else
0780 rcu_read_unlock();
0781
0782 return AE_OK;
0783 }
0784
0785 #ifdef CONFIG_PCI
0786 acpi_status
0787 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
0788 u64 *value, u32 width)
0789 {
0790 int result, size;
0791 u32 value32;
0792
0793 if (!value)
0794 return AE_BAD_PARAMETER;
0795
0796 switch (width) {
0797 case 8:
0798 size = 1;
0799 break;
0800 case 16:
0801 size = 2;
0802 break;
0803 case 32:
0804 size = 4;
0805 break;
0806 default:
0807 return AE_ERROR;
0808 }
0809
0810 result = raw_pci_read(pci_id->segment, pci_id->bus,
0811 PCI_DEVFN(pci_id->device, pci_id->function),
0812 reg, size, &value32);
0813 *value = value32;
0814
0815 return (result ? AE_ERROR : AE_OK);
0816 }
0817
0818 acpi_status
0819 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
0820 u64 value, u32 width)
0821 {
0822 int result, size;
0823
0824 switch (width) {
0825 case 8:
0826 size = 1;
0827 break;
0828 case 16:
0829 size = 2;
0830 break;
0831 case 32:
0832 size = 4;
0833 break;
0834 default:
0835 return AE_ERROR;
0836 }
0837
0838 result = raw_pci_write(pci_id->segment, pci_id->bus,
0839 PCI_DEVFN(pci_id->device, pci_id->function),
0840 reg, size, value);
0841
0842 return (result ? AE_ERROR : AE_OK);
0843 }
0844 #endif
0845
0846 static void acpi_os_execute_deferred(struct work_struct *work)
0847 {
0848 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
0849
0850 dpc->function(dpc->context);
0851 kfree(dpc);
0852 }
0853
0854 #ifdef CONFIG_ACPI_DEBUGGER
0855 static struct acpi_debugger acpi_debugger;
0856 static bool acpi_debugger_initialized;
0857
0858 int acpi_register_debugger(struct module *owner,
0859 const struct acpi_debugger_ops *ops)
0860 {
0861 int ret = 0;
0862
0863 mutex_lock(&acpi_debugger.lock);
0864 if (acpi_debugger.ops) {
0865 ret = -EBUSY;
0866 goto err_lock;
0867 }
0868
0869 acpi_debugger.owner = owner;
0870 acpi_debugger.ops = ops;
0871
0872 err_lock:
0873 mutex_unlock(&acpi_debugger.lock);
0874 return ret;
0875 }
0876 EXPORT_SYMBOL(acpi_register_debugger);
0877
0878 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
0879 {
0880 mutex_lock(&acpi_debugger.lock);
0881 if (ops == acpi_debugger.ops) {
0882 acpi_debugger.ops = NULL;
0883 acpi_debugger.owner = NULL;
0884 }
0885 mutex_unlock(&acpi_debugger.lock);
0886 }
0887 EXPORT_SYMBOL(acpi_unregister_debugger);
0888
0889 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
0890 {
0891 int ret;
0892 int (*func)(acpi_osd_exec_callback, void *);
0893 struct module *owner;
0894
0895 if (!acpi_debugger_initialized)
0896 return -ENODEV;
0897 mutex_lock(&acpi_debugger.lock);
0898 if (!acpi_debugger.ops) {
0899 ret = -ENODEV;
0900 goto err_lock;
0901 }
0902 if (!try_module_get(acpi_debugger.owner)) {
0903 ret = -ENODEV;
0904 goto err_lock;
0905 }
0906 func = acpi_debugger.ops->create_thread;
0907 owner = acpi_debugger.owner;
0908 mutex_unlock(&acpi_debugger.lock);
0909
0910 ret = func(function, context);
0911
0912 mutex_lock(&acpi_debugger.lock);
0913 module_put(owner);
0914 err_lock:
0915 mutex_unlock(&acpi_debugger.lock);
0916 return ret;
0917 }
0918
0919 ssize_t acpi_debugger_write_log(const char *msg)
0920 {
0921 ssize_t ret;
0922 ssize_t (*func)(const char *);
0923 struct module *owner;
0924
0925 if (!acpi_debugger_initialized)
0926 return -ENODEV;
0927 mutex_lock(&acpi_debugger.lock);
0928 if (!acpi_debugger.ops) {
0929 ret = -ENODEV;
0930 goto err_lock;
0931 }
0932 if (!try_module_get(acpi_debugger.owner)) {
0933 ret = -ENODEV;
0934 goto err_lock;
0935 }
0936 func = acpi_debugger.ops->write_log;
0937 owner = acpi_debugger.owner;
0938 mutex_unlock(&acpi_debugger.lock);
0939
0940 ret = func(msg);
0941
0942 mutex_lock(&acpi_debugger.lock);
0943 module_put(owner);
0944 err_lock:
0945 mutex_unlock(&acpi_debugger.lock);
0946 return ret;
0947 }
0948
0949 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
0950 {
0951 ssize_t ret;
0952 ssize_t (*func)(char *, size_t);
0953 struct module *owner;
0954
0955 if (!acpi_debugger_initialized)
0956 return -ENODEV;
0957 mutex_lock(&acpi_debugger.lock);
0958 if (!acpi_debugger.ops) {
0959 ret = -ENODEV;
0960 goto err_lock;
0961 }
0962 if (!try_module_get(acpi_debugger.owner)) {
0963 ret = -ENODEV;
0964 goto err_lock;
0965 }
0966 func = acpi_debugger.ops->read_cmd;
0967 owner = acpi_debugger.owner;
0968 mutex_unlock(&acpi_debugger.lock);
0969
0970 ret = func(buffer, buffer_length);
0971
0972 mutex_lock(&acpi_debugger.lock);
0973 module_put(owner);
0974 err_lock:
0975 mutex_unlock(&acpi_debugger.lock);
0976 return ret;
0977 }
0978
0979 int acpi_debugger_wait_command_ready(void)
0980 {
0981 int ret;
0982 int (*func)(bool, char *, size_t);
0983 struct module *owner;
0984
0985 if (!acpi_debugger_initialized)
0986 return -ENODEV;
0987 mutex_lock(&acpi_debugger.lock);
0988 if (!acpi_debugger.ops) {
0989 ret = -ENODEV;
0990 goto err_lock;
0991 }
0992 if (!try_module_get(acpi_debugger.owner)) {
0993 ret = -ENODEV;
0994 goto err_lock;
0995 }
0996 func = acpi_debugger.ops->wait_command_ready;
0997 owner = acpi_debugger.owner;
0998 mutex_unlock(&acpi_debugger.lock);
0999
1000 ret = func(acpi_gbl_method_executing,
1001 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1002
1003 mutex_lock(&acpi_debugger.lock);
1004 module_put(owner);
1005 err_lock:
1006 mutex_unlock(&acpi_debugger.lock);
1007 return ret;
1008 }
1009
1010 int acpi_debugger_notify_command_complete(void)
1011 {
1012 int ret;
1013 int (*func)(void);
1014 struct module *owner;
1015
1016 if (!acpi_debugger_initialized)
1017 return -ENODEV;
1018 mutex_lock(&acpi_debugger.lock);
1019 if (!acpi_debugger.ops) {
1020 ret = -ENODEV;
1021 goto err_lock;
1022 }
1023 if (!try_module_get(acpi_debugger.owner)) {
1024 ret = -ENODEV;
1025 goto err_lock;
1026 }
1027 func = acpi_debugger.ops->notify_command_complete;
1028 owner = acpi_debugger.owner;
1029 mutex_unlock(&acpi_debugger.lock);
1030
1031 ret = func();
1032
1033 mutex_lock(&acpi_debugger.lock);
1034 module_put(owner);
1035 err_lock:
1036 mutex_unlock(&acpi_debugger.lock);
1037 return ret;
1038 }
1039
1040 int __init acpi_debugger_init(void)
1041 {
1042 mutex_init(&acpi_debugger.lock);
1043 acpi_debugger_initialized = true;
1044 return 0;
1045 }
1046 #endif
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 acpi_status acpi_os_execute(acpi_execute_type type,
1064 acpi_osd_exec_callback function, void *context)
1065 {
1066 acpi_status status = AE_OK;
1067 struct acpi_os_dpc *dpc;
1068 struct workqueue_struct *queue;
1069 int ret;
1070 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1071 "Scheduling function [%p(%p)] for deferred execution.\n",
1072 function, context));
1073
1074 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1075 ret = acpi_debugger_create_thread(function, context);
1076 if (ret) {
1077 pr_err("Kernel thread creation failed\n");
1078 status = AE_ERROR;
1079 }
1080 goto out_thread;
1081 }
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1093 if (!dpc)
1094 return AE_NO_MEMORY;
1095
1096 dpc->function = function;
1097 dpc->context = context;
1098
1099
1100
1101
1102
1103
1104 if (type == OSL_NOTIFY_HANDLER) {
1105 queue = kacpi_notify_wq;
1106 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1107 } else if (type == OSL_GPE_HANDLER) {
1108 queue = kacpid_wq;
1109 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1110 } else {
1111 pr_err("Unsupported os_execute type %d.\n", type);
1112 status = AE_ERROR;
1113 }
1114
1115 if (ACPI_FAILURE(status))
1116 goto err_workqueue;
1117
1118
1119
1120
1121
1122
1123
1124
1125 ret = queue_work_on(0, queue, &dpc->work);
1126 if (!ret) {
1127 pr_err("Unable to queue work\n");
1128 status = AE_ERROR;
1129 }
1130 err_workqueue:
1131 if (ACPI_FAILURE(status))
1132 kfree(dpc);
1133 out_thread:
1134 return status;
1135 }
1136 EXPORT_SYMBOL(acpi_os_execute);
1137
1138 void acpi_os_wait_events_complete(void)
1139 {
1140
1141
1142
1143
1144 if (acpi_sci_irq_valid())
1145 synchronize_hardirq(acpi_sci_irq);
1146 flush_workqueue(kacpid_wq);
1147 flush_workqueue(kacpi_notify_wq);
1148 }
1149 EXPORT_SYMBOL(acpi_os_wait_events_complete);
1150
1151 struct acpi_hp_work {
1152 struct work_struct work;
1153 struct acpi_device *adev;
1154 u32 src;
1155 };
1156
1157 static void acpi_hotplug_work_fn(struct work_struct *work)
1158 {
1159 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1160
1161 acpi_os_wait_events_complete();
1162 acpi_device_hotplug(hpw->adev, hpw->src);
1163 kfree(hpw);
1164 }
1165
1166 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1167 {
1168 struct acpi_hp_work *hpw;
1169
1170 acpi_handle_debug(adev->handle,
1171 "Scheduling hotplug event %u for deferred handling\n",
1172 src);
1173
1174 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1175 if (!hpw)
1176 return AE_NO_MEMORY;
1177
1178 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1179 hpw->adev = adev;
1180 hpw->src = src;
1181
1182
1183
1184
1185
1186
1187 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1188 kfree(hpw);
1189 return AE_ERROR;
1190 }
1191 return AE_OK;
1192 }
1193
1194 bool acpi_queue_hotplug_work(struct work_struct *work)
1195 {
1196 return queue_work(kacpi_hotplug_wq, work);
1197 }
1198
1199 acpi_status
1200 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1201 {
1202 struct semaphore *sem = NULL;
1203
1204 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1205 if (!sem)
1206 return AE_NO_MEMORY;
1207
1208 sema_init(sem, initial_units);
1209
1210 *handle = (acpi_handle *) sem;
1211
1212 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1213 *handle, initial_units));
1214
1215 return AE_OK;
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1226 {
1227 struct semaphore *sem = (struct semaphore *)handle;
1228
1229 if (!sem)
1230 return AE_BAD_PARAMETER;
1231
1232 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1233
1234 BUG_ON(!list_empty(&sem->wait_list));
1235 kfree(sem);
1236 sem = NULL;
1237
1238 return AE_OK;
1239 }
1240
1241
1242
1243
1244 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1245 {
1246 acpi_status status = AE_OK;
1247 struct semaphore *sem = (struct semaphore *)handle;
1248 long jiffies;
1249 int ret = 0;
1250
1251 if (!acpi_os_initialized)
1252 return AE_OK;
1253
1254 if (!sem || (units < 1))
1255 return AE_BAD_PARAMETER;
1256
1257 if (units > 1)
1258 return AE_SUPPORT;
1259
1260 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1261 handle, units, timeout));
1262
1263 if (timeout == ACPI_WAIT_FOREVER)
1264 jiffies = MAX_SCHEDULE_TIMEOUT;
1265 else
1266 jiffies = msecs_to_jiffies(timeout);
1267
1268 ret = down_timeout(sem, jiffies);
1269 if (ret)
1270 status = AE_TIME;
1271
1272 if (ACPI_FAILURE(status)) {
1273 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1274 "Failed to acquire semaphore[%p|%d|%d], %s",
1275 handle, units, timeout,
1276 acpi_format_exception(status)));
1277 } else {
1278 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1279 "Acquired semaphore[%p|%d|%d]", handle,
1280 units, timeout));
1281 }
1282
1283 return status;
1284 }
1285
1286
1287
1288
1289 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1290 {
1291 struct semaphore *sem = (struct semaphore *)handle;
1292
1293 if (!acpi_os_initialized)
1294 return AE_OK;
1295
1296 if (!sem || (units < 1))
1297 return AE_BAD_PARAMETER;
1298
1299 if (units > 1)
1300 return AE_SUPPORT;
1301
1302 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1303 units));
1304
1305 up(sem);
1306
1307 return AE_OK;
1308 }
1309
1310 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1311 {
1312 #ifdef ENABLE_DEBUGGER
1313 if (acpi_in_debugger) {
1314 u32 chars;
1315
1316 kdb_read(buffer, buffer_length);
1317
1318
1319 chars = strlen(buffer) - 1;
1320 buffer[chars] = '\0';
1321 }
1322 #else
1323 int ret;
1324
1325 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1326 if (ret < 0)
1327 return AE_ERROR;
1328 if (bytes_read)
1329 *bytes_read = ret;
1330 #endif
1331
1332 return AE_OK;
1333 }
1334 EXPORT_SYMBOL(acpi_os_get_line);
1335
1336 acpi_status acpi_os_wait_command_ready(void)
1337 {
1338 int ret;
1339
1340 ret = acpi_debugger_wait_command_ready();
1341 if (ret < 0)
1342 return AE_ERROR;
1343 return AE_OK;
1344 }
1345
1346 acpi_status acpi_os_notify_command_complete(void)
1347 {
1348 int ret;
1349
1350 ret = acpi_debugger_notify_command_complete();
1351 if (ret < 0)
1352 return AE_ERROR;
1353 return AE_OK;
1354 }
1355
1356 acpi_status acpi_os_signal(u32 function, void *info)
1357 {
1358 switch (function) {
1359 case ACPI_SIGNAL_FATAL:
1360 pr_err("Fatal opcode executed\n");
1361 break;
1362 case ACPI_SIGNAL_BREAKPOINT:
1363
1364
1365
1366
1367
1368
1369
1370
1371 break;
1372 default:
1373 break;
1374 }
1375
1376 return AE_OK;
1377 }
1378
1379 static int __init acpi_os_name_setup(char *str)
1380 {
1381 char *p = acpi_os_name;
1382 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1383
1384 if (!str || !*str)
1385 return 0;
1386
1387 for (; count-- && *str; str++) {
1388 if (isalnum(*str) || *str == ' ' || *str == ':')
1389 *p++ = *str;
1390 else if (*str == '\'' || *str == '"')
1391 continue;
1392 else
1393 break;
1394 }
1395 *p = 0;
1396
1397 return 1;
1398
1399 }
1400
1401 __setup("acpi_os_name=", acpi_os_name_setup);
1402
1403
1404
1405
1406
1407
1408
1409 static int __init acpi_no_auto_serialize_setup(char *str)
1410 {
1411 acpi_gbl_auto_serialize_methods = FALSE;
1412 pr_info("Auto-serialization disabled\n");
1413
1414 return 1;
1415 }
1416
1417 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435 #define ENFORCE_RESOURCES_STRICT 2
1436 #define ENFORCE_RESOURCES_LAX 1
1437 #define ENFORCE_RESOURCES_NO 0
1438
1439 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1440
1441 static int __init acpi_enforce_resources_setup(char *str)
1442 {
1443 if (str == NULL || *str == '\0')
1444 return 0;
1445
1446 if (!strcmp("strict", str))
1447 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1448 else if (!strcmp("lax", str))
1449 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1450 else if (!strcmp("no", str))
1451 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1452
1453 return 1;
1454 }
1455
1456 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1457
1458
1459
1460 int acpi_check_resource_conflict(const struct resource *res)
1461 {
1462 acpi_adr_space_type space_id;
1463
1464 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1465 return 0;
1466
1467 if (res->flags & IORESOURCE_IO)
1468 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1469 else if (res->flags & IORESOURCE_MEM)
1470 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1471 else
1472 return 0;
1473
1474 if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1))
1475 return 0;
1476
1477 pr_info("Resource conflict; ACPI support missing from driver?\n");
1478
1479 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1480 return -EBUSY;
1481
1482 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1483 pr_notice("Resource conflict: System may be unstable or behave erratically\n");
1484
1485 return 0;
1486 }
1487 EXPORT_SYMBOL(acpi_check_resource_conflict);
1488
1489 int acpi_check_region(resource_size_t start, resource_size_t n,
1490 const char *name)
1491 {
1492 struct resource res = DEFINE_RES_IO_NAMED(start, n, name);
1493
1494 return acpi_check_resource_conflict(&res);
1495 }
1496 EXPORT_SYMBOL(acpi_check_region);
1497
1498
1499
1500
1501 int acpi_resources_are_enforced(void)
1502 {
1503 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1504 }
1505 EXPORT_SYMBOL(acpi_resources_are_enforced);
1506
1507
1508
1509
1510 void acpi_os_delete_lock(acpi_spinlock handle)
1511 {
1512 ACPI_FREE(handle);
1513 }
1514
1515
1516
1517
1518
1519
1520
1521 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1522 __acquires(lockp)
1523 {
1524 acpi_cpu_flags flags;
1525 spin_lock_irqsave(lockp, flags);
1526 return flags;
1527 }
1528
1529
1530
1531
1532
1533 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1534 __releases(lockp)
1535 {
1536 spin_unlock_irqrestore(lockp, flags);
1537 }
1538
1539 #ifndef ACPI_USE_LOCAL_CACHE
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556 acpi_status
1557 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1558 {
1559 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1560 if (*cache == NULL)
1561 return AE_ERROR;
1562 else
1563 return AE_OK;
1564 }
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1579 {
1580 kmem_cache_shrink(cache);
1581 return (AE_OK);
1582 }
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1598 {
1599 kmem_cache_destroy(cache);
1600 return (AE_OK);
1601 }
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1618 {
1619 kmem_cache_free(cache, object);
1620 return (AE_OK);
1621 }
1622 #endif
1623
1624 static int __init acpi_no_static_ssdt_setup(char *s)
1625 {
1626 acpi_gbl_disable_ssdt_table_install = TRUE;
1627 pr_info("Static SSDT installation disabled\n");
1628
1629 return 0;
1630 }
1631
1632 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1633
1634 static int __init acpi_disable_return_repair(char *s)
1635 {
1636 pr_notice("Predefined validation mechanism disabled\n");
1637 acpi_gbl_disable_auto_repair = TRUE;
1638
1639 return 1;
1640 }
1641
1642 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1643
1644 acpi_status __init acpi_os_initialize(void)
1645 {
1646 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1647 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1648
1649 acpi_gbl_xgpe0_block_logical_address =
1650 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1651 acpi_gbl_xgpe1_block_logical_address =
1652 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1653
1654 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1655
1656
1657
1658
1659 void *rv;
1660
1661 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1662 pr_debug("%s: Reset register mapping %s\n", __func__,
1663 rv ? "successful" : "failed");
1664 }
1665 acpi_os_initialized = true;
1666
1667 return AE_OK;
1668 }
1669
1670 acpi_status __init acpi_os_initialize1(void)
1671 {
1672 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1673 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1674 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1675 BUG_ON(!kacpid_wq);
1676 BUG_ON(!kacpi_notify_wq);
1677 BUG_ON(!kacpi_hotplug_wq);
1678 acpi_osi_init();
1679 return AE_OK;
1680 }
1681
1682 acpi_status acpi_os_terminate(void)
1683 {
1684 if (acpi_irq_handler) {
1685 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1686 acpi_irq_handler);
1687 }
1688
1689 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1690 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1691 acpi_gbl_xgpe0_block_logical_address = 0UL;
1692 acpi_gbl_xgpe1_block_logical_address = 0UL;
1693
1694 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1695 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1696
1697 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1698 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1699
1700 destroy_workqueue(kacpid_wq);
1701 destroy_workqueue(kacpi_notify_wq);
1702 destroy_workqueue(kacpi_hotplug_wq);
1703
1704 return AE_OK;
1705 }
1706
1707 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1708 u32 pm1b_control)
1709 {
1710 int rc = 0;
1711 if (__acpi_os_prepare_sleep)
1712 rc = __acpi_os_prepare_sleep(sleep_state,
1713 pm1a_control, pm1b_control);
1714 if (rc < 0)
1715 return AE_ERROR;
1716 else if (rc > 0)
1717 return AE_CTRL_TERMINATE;
1718
1719 return AE_OK;
1720 }
1721
1722 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1723 u32 pm1a_ctrl, u32 pm1b_ctrl))
1724 {
1725 __acpi_os_prepare_sleep = func;
1726 }
1727
1728 #if (ACPI_REDUCED_HARDWARE)
1729 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1730 u32 val_b)
1731 {
1732 int rc = 0;
1733 if (__acpi_os_prepare_extended_sleep)
1734 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1735 val_a, val_b);
1736 if (rc < 0)
1737 return AE_ERROR;
1738 else if (rc > 0)
1739 return AE_CTRL_TERMINATE;
1740
1741 return AE_OK;
1742 }
1743 #else
1744 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1745 u32 val_b)
1746 {
1747 return AE_OK;
1748 }
1749 #endif
1750
1751 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1752 u32 val_a, u32 val_b))
1753 {
1754 __acpi_os_prepare_extended_sleep = func;
1755 }
1756
1757 acpi_status acpi_os_enter_sleep(u8 sleep_state,
1758 u32 reg_a_value, u32 reg_b_value)
1759 {
1760 acpi_status status;
1761
1762 if (acpi_gbl_reduced_hardware)
1763 status = acpi_os_prepare_extended_sleep(sleep_state,
1764 reg_a_value,
1765 reg_b_value);
1766 else
1767 status = acpi_os_prepare_sleep(sleep_state,
1768 reg_a_value, reg_b_value);
1769 return status;
1770 }