0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <acpi/acpi.h>
0011 #include "accommon.h"
0012 #include "acevents.h"
0013 #include "acnamesp.h"
0014
0015 #define _COMPONENT ACPI_EVENTS
0016 ACPI_MODULE_NAME("evgpe")
0017 #if (!ACPI_REDUCED_HARDWARE)
0018
0019 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
0020
0021 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 acpi_status
0037 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
0038 {
0039 struct acpi_gpe_register_info *gpe_register_info;
0040 u32 register_bit;
0041
0042 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
0043
0044 gpe_register_info = gpe_event_info->register_info;
0045 if (!gpe_register_info) {
0046 return_ACPI_STATUS(AE_NOT_EXIST);
0047 }
0048
0049 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
0050
0051
0052
0053 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
0054
0055
0056
0057 if (gpe_event_info->runtime_count) {
0058 ACPI_SET_BIT(gpe_register_info->enable_for_run,
0059 (u8)register_bit);
0060 }
0061
0062 gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
0063 return_ACPI_STATUS(AE_OK);
0064 }
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
0079 {
0080 acpi_status status;
0081
0082 ACPI_FUNCTION_TRACE(ev_enable_gpe);
0083
0084
0085
0086 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
0087 return_ACPI_STATUS(status);
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 acpi_status
0104 acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
0105 {
0106 struct acpi_gpe_register_info *gpe_register_info;
0107 u32 register_bit;
0108
0109 ACPI_FUNCTION_TRACE(ev_mask_gpe);
0110
0111 gpe_register_info = gpe_event_info->register_info;
0112 if (!gpe_register_info) {
0113 return_ACPI_STATUS(AE_NOT_EXIST);
0114 }
0115
0116 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
0117
0118
0119
0120 if (is_masked) {
0121 if (register_bit & gpe_register_info->mask_for_run) {
0122 return_ACPI_STATUS(AE_BAD_PARAMETER);
0123 }
0124
0125 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
0126 ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
0127 } else {
0128 if (!(register_bit & gpe_register_info->mask_for_run)) {
0129 return_ACPI_STATUS(AE_BAD_PARAMETER);
0130 }
0131
0132 ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
0133 (u8)register_bit);
0134 if (gpe_event_info->runtime_count
0135 && !gpe_event_info->disable_for_dispatch) {
0136 (void)acpi_hw_low_set_gpe(gpe_event_info,
0137 ACPI_GPE_ENABLE);
0138 }
0139 }
0140
0141 return_ACPI_STATUS(AE_OK);
0142 }
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 acpi_status
0159 acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
0160 u8 clear_on_enable)
0161 {
0162 acpi_status status = AE_OK;
0163
0164 ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
0165
0166 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
0167 return_ACPI_STATUS(AE_LIMIT);
0168 }
0169
0170 gpe_event_info->runtime_count++;
0171 if (gpe_event_info->runtime_count == 1) {
0172
0173
0174
0175 if (clear_on_enable) {
0176 (void)acpi_hw_clear_gpe(gpe_event_info);
0177 }
0178
0179 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
0180 if (ACPI_SUCCESS(status)) {
0181 status = acpi_ev_enable_gpe(gpe_event_info);
0182 }
0183
0184 if (ACPI_FAILURE(status)) {
0185 gpe_event_info->runtime_count--;
0186 }
0187 }
0188
0189 return_ACPI_STATUS(status);
0190 }
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205 acpi_status
0206 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
0207 {
0208 acpi_status status = AE_OK;
0209
0210 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
0211
0212 if (!gpe_event_info->runtime_count) {
0213 return_ACPI_STATUS(AE_LIMIT);
0214 }
0215
0216 gpe_event_info->runtime_count--;
0217 if (!gpe_event_info->runtime_count) {
0218
0219
0220
0221 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
0222 if (ACPI_SUCCESS(status)) {
0223 status =
0224 acpi_hw_low_set_gpe(gpe_event_info,
0225 ACPI_GPE_DISABLE);
0226 }
0227
0228 if (ACPI_FAILURE(status)) {
0229 gpe_event_info->runtime_count++;
0230 }
0231 }
0232
0233 return_ACPI_STATUS(status);
0234 }
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
0252 struct acpi_gpe_block_info
0253 *gpe_block)
0254 {
0255 u32 gpe_index;
0256
0257
0258
0259
0260
0261 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
0262 return (NULL);
0263 }
0264
0265 gpe_index = gpe_number - gpe_block->block_base_number;
0266 if (gpe_index >= gpe_block->gpe_count) {
0267 return (NULL);
0268 }
0269
0270 return (&gpe_block->event_info[gpe_index]);
0271 }
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
0292 u32 gpe_number)
0293 {
0294 union acpi_operand_object *obj_desc;
0295 struct acpi_gpe_event_info *gpe_info;
0296 u32 i;
0297
0298 ACPI_FUNCTION_ENTRY();
0299
0300
0301
0302 if (!gpe_device) {
0303
0304
0305
0306 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
0307 gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
0308 acpi_gbl_gpe_fadt_blocks
0309 [i]);
0310 if (gpe_info) {
0311 return (gpe_info);
0312 }
0313 }
0314
0315
0316
0317 return (NULL);
0318 }
0319
0320
0321
0322 obj_desc =
0323 acpi_ns_get_attached_object((struct acpi_namespace_node *)
0324 gpe_device);
0325 if (!obj_desc || !obj_desc->device.gpe_block) {
0326 return (NULL);
0327 }
0328
0329 return (acpi_ev_low_get_gpe_info
0330 (gpe_number, obj_desc->device.gpe_block));
0331 }
0332
0333
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
0348 {
0349 struct acpi_gpe_block_info *gpe_block;
0350 struct acpi_namespace_node *gpe_device;
0351 struct acpi_gpe_register_info *gpe_register_info;
0352 struct acpi_gpe_event_info *gpe_event_info;
0353 u32 gpe_number;
0354 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
0355 acpi_cpu_flags flags;
0356 u32 i;
0357 u32 j;
0358
0359 ACPI_FUNCTION_NAME(ev_gpe_detect);
0360
0361
0362
0363 if (!gpe_xrupt_list) {
0364 return (int_status);
0365 }
0366
0367
0368
0369
0370
0371
0372 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0373
0374
0375
0376 gpe_block = gpe_xrupt_list->gpe_block_list_head;
0377 while (gpe_block) {
0378 gpe_device = gpe_block->node;
0379
0380
0381
0382
0383
0384 for (i = 0; i < gpe_block->register_count; i++) {
0385
0386
0387
0388 gpe_register_info = &gpe_block->register_info[i];
0389
0390
0391
0392
0393
0394 if (!(gpe_register_info->enable_for_run |
0395 gpe_register_info->enable_for_wake)) {
0396 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
0397 "Ignore disabled registers for GPE %02X-%02X: "
0398 "RunEnable=%02X, WakeEnable=%02X\n",
0399 gpe_register_info->
0400 base_gpe_number,
0401 gpe_register_info->
0402 base_gpe_number +
0403 (ACPI_GPE_REGISTER_WIDTH - 1),
0404 gpe_register_info->
0405 enable_for_run,
0406 gpe_register_info->
0407 enable_for_wake));
0408 continue;
0409 }
0410
0411
0412
0413 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
0414
0415
0416
0417 gpe_event_info =
0418 &gpe_block->
0419 event_info[((acpi_size)i *
0420 ACPI_GPE_REGISTER_WIDTH) + j];
0421 gpe_number =
0422 j + gpe_register_info->base_gpe_number;
0423 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0424 int_status |=
0425 acpi_ev_detect_gpe(gpe_device,
0426 gpe_event_info,
0427 gpe_number);
0428 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0429 }
0430 }
0431
0432 gpe_block = gpe_block->next;
0433 }
0434
0435 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0436 return (int_status);
0437 }
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
0456 {
0457 struct acpi_gpe_event_info *gpe_event_info = context;
0458 acpi_status status = AE_OK;
0459 struct acpi_evaluate_info *info;
0460 struct acpi_gpe_notify_info *notify;
0461
0462 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
0463
0464
0465
0466 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
0467 case ACPI_GPE_DISPATCH_NOTIFY:
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 notify = gpe_event_info->dispatch.notify_list;
0480 while (ACPI_SUCCESS(status) && notify) {
0481 status =
0482 acpi_ev_queue_notify_request(notify->device_node,
0483 ACPI_NOTIFY_DEVICE_WAKE);
0484
0485 notify = notify->next;
0486 }
0487
0488 break;
0489
0490 case ACPI_GPE_DISPATCH_METHOD:
0491
0492
0493
0494 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
0495 if (!info) {
0496 status = AE_NO_MEMORY;
0497 } else {
0498
0499
0500
0501
0502 info->prefix_node =
0503 gpe_event_info->dispatch.method_node;
0504 info->flags = ACPI_IGNORE_RETURN_VALUE;
0505
0506 status = acpi_ns_evaluate(info);
0507 ACPI_FREE(info);
0508 }
0509
0510 if (ACPI_FAILURE(status)) {
0511 ACPI_EXCEPTION((AE_INFO, status,
0512 "while evaluating GPE method [%4.4s]",
0513 acpi_ut_get_node_name(gpe_event_info->
0514 dispatch.
0515 method_node)));
0516 }
0517 break;
0518
0519 default:
0520
0521 goto error_exit;
0522 }
0523
0524
0525
0526 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
0527 acpi_ev_asynch_enable_gpe, gpe_event_info);
0528 if (ACPI_SUCCESS(status)) {
0529 return_VOID;
0530 }
0531
0532 error_exit:
0533 acpi_ev_asynch_enable_gpe(gpe_event_info);
0534 return_VOID;
0535 }
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
0553 {
0554 struct acpi_gpe_event_info *gpe_event_info = context;
0555 acpi_cpu_flags flags;
0556
0557 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0558 (void)acpi_ev_finish_gpe(gpe_event_info);
0559 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0560
0561 return;
0562 }
0563
0564
0565
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577
0578 acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
0579 {
0580 acpi_status status;
0581
0582 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
0583 ACPI_GPE_LEVEL_TRIGGERED) {
0584
0585
0586
0587
0588 status = acpi_hw_clear_gpe(gpe_event_info);
0589 if (ACPI_FAILURE(status)) {
0590 return (status);
0591 }
0592 }
0593
0594
0595
0596
0597
0598
0599 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
0600 gpe_event_info->disable_for_dispatch = FALSE;
0601 return (AE_OK);
0602 }
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625 u32
0626 acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
0627 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
0628 {
0629 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
0630 u8 enabled_status_byte;
0631 u64 status_reg;
0632 u64 enable_reg;
0633 u32 register_bit;
0634 struct acpi_gpe_register_info *gpe_register_info;
0635 struct acpi_gpe_handler_info *gpe_handler_info;
0636 acpi_cpu_flags flags;
0637 acpi_status status;
0638
0639 ACPI_FUNCTION_TRACE(ev_gpe_detect);
0640
0641 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0642
0643 if (!gpe_event_info) {
0644 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
0645 if (!gpe_event_info)
0646 goto error_exit;
0647 }
0648
0649
0650
0651 gpe_register_info = gpe_event_info->register_info;
0652
0653
0654
0655 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
0656
0657
0658
0659 status = acpi_hw_gpe_read(&enable_reg, &gpe_register_info->enable_address);
0660 if (ACPI_FAILURE(status)) {
0661 goto error_exit;
0662 }
0663
0664
0665
0666 status = acpi_hw_gpe_read(&status_reg, &gpe_register_info->status_address);
0667 if (ACPI_FAILURE(status)) {
0668 goto error_exit;
0669 }
0670
0671
0672
0673 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
0674 "Read registers for GPE %02X: Status=%02X, Enable=%02X, "
0675 "RunEnable=%02X, WakeEnable=%02X\n",
0676 gpe_number,
0677 (u32)(status_reg & register_bit),
0678 (u32)(enable_reg & register_bit),
0679 gpe_register_info->enable_for_run,
0680 gpe_register_info->enable_for_wake));
0681
0682 enabled_status_byte = (u8)(status_reg & enable_reg);
0683 if (!(enabled_status_byte & register_bit)) {
0684 goto error_exit;
0685 }
0686
0687
0688
0689 acpi_gpe_count++;
0690 if (acpi_gbl_global_event_handler) {
0691 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE,
0692 gpe_device, gpe_number,
0693 acpi_gbl_global_event_handler_context);
0694 }
0695
0696
0697
0698 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
0699 ACPI_GPE_DISPATCH_RAW_HANDLER) {
0700
0701
0702
0703 gpe_handler_info = gpe_event_info->dispatch.handler;
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0716 int_status |=
0717 gpe_handler_info->address(gpe_device, gpe_number,
0718 gpe_handler_info->context);
0719 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0720 } else {
0721
0722
0723 int_status |= acpi_ev_gpe_dispatch(gpe_device,
0724 gpe_event_info, gpe_number);
0725 }
0726
0727 error_exit:
0728 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0729 return (int_status);
0730 }
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747 u32
0748 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
0749 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
0750 {
0751 acpi_status status;
0752 u32 return_value;
0753
0754 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
0766 if (ACPI_FAILURE(status)) {
0767 ACPI_EXCEPTION((AE_INFO, status,
0768 "Unable to disable GPE %02X", gpe_number));
0769 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
0770 }
0771
0772
0773
0774
0775
0776 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
0777 ACPI_GPE_EDGE_TRIGGERED) {
0778 status = acpi_hw_clear_gpe(gpe_event_info);
0779 if (ACPI_FAILURE(status)) {
0780 ACPI_EXCEPTION((AE_INFO, status,
0781 "Unable to clear GPE %02X",
0782 gpe_number));
0783 (void)acpi_hw_low_set_gpe(gpe_event_info,
0784 ACPI_GPE_CONDITIONAL_ENABLE);
0785 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
0786 }
0787 }
0788
0789 gpe_event_info->disable_for_dispatch = TRUE;
0790
0791
0792
0793
0794
0795
0796
0797
0798 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
0799 case ACPI_GPE_DISPATCH_HANDLER:
0800
0801
0802
0803 return_value =
0804 gpe_event_info->dispatch.handler->address(gpe_device,
0805 gpe_number,
0806 gpe_event_info->
0807 dispatch.handler->
0808 context);
0809
0810
0811
0812 if (return_value & ACPI_REENABLE_GPE) {
0813 (void)acpi_ev_finish_gpe(gpe_event_info);
0814 }
0815 break;
0816
0817 case ACPI_GPE_DISPATCH_METHOD:
0818 case ACPI_GPE_DISPATCH_NOTIFY:
0819
0820
0821
0822
0823 status = acpi_os_execute(OSL_GPE_HANDLER,
0824 acpi_ev_asynch_execute_gpe_method,
0825 gpe_event_info);
0826 if (ACPI_FAILURE(status)) {
0827 ACPI_EXCEPTION((AE_INFO, status,
0828 "Unable to queue handler for GPE %02X - event disabled",
0829 gpe_number));
0830 }
0831 break;
0832
0833 default:
0834
0835
0836
0837
0838
0839 ACPI_ERROR((AE_INFO,
0840 "No handler or method for GPE %02X, disabling event",
0841 gpe_number));
0842
0843 break;
0844 }
0845
0846 return_UINT32(ACPI_INTERRUPT_HANDLED);
0847 }
0848
0849 #endif