Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
0002 /******************************************************************************
0003  *
0004  * Module Name: evgpe - General Purpose Event handling and dispatch
0005  *
0006  * Copyright (C) 2000 - 2022, Intel Corp.
0007  *
0008  *****************************************************************************/
0009 
0010 #include <acpi/acpi.h>
0011 #include "accommon.h"
0012 #include "acevents.h"
0013 #include "acnamesp.h"
0014 
0015 #define _COMPONENT          ACPI_EVENTS
0016 ACPI_MODULE_NAME("evgpe")
0017 #if (!ACPI_REDUCED_HARDWARE)    /* Entire module */
0018 /* Local prototypes */
0019 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
0020 
0021 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
0022 
0023 /*******************************************************************************
0024  *
0025  * FUNCTION:    acpi_ev_update_gpe_enable_mask
0026  *
0027  * PARAMETERS:  gpe_event_info          - GPE to update
0028  *
0029  * RETURN:      Status
0030  *
0031  * DESCRIPTION: Updates GPE register enable mask based upon whether there are
0032  *              runtime references to this GPE
0033  *
0034  ******************************************************************************/
0035 
0036 acpi_status
0037 acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
0038 {
0039     struct acpi_gpe_register_info *gpe_register_info;
0040     u32 register_bit;
0041 
0042     ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
0043 
0044     gpe_register_info = gpe_event_info->register_info;
0045     if (!gpe_register_info) {
0046         return_ACPI_STATUS(AE_NOT_EXIST);
0047     }
0048 
0049     register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
0050 
0051     /* Clear the run bit up front */
0052 
0053     ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
0054 
0055     /* Set the mask bit only if there are references to this GPE */
0056 
0057     if (gpe_event_info->runtime_count) {
0058         ACPI_SET_BIT(gpe_register_info->enable_for_run,
0059                  (u8)register_bit);
0060     }
0061 
0062     gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
0063     return_ACPI_STATUS(AE_OK);
0064 }
0065 
0066 /*******************************************************************************
0067  *
0068  * FUNCTION:    acpi_ev_enable_gpe
0069  *
0070  * PARAMETERS:  gpe_event_info          - GPE to enable
0071  *
0072  * RETURN:      Status
0073  *
0074  * DESCRIPTION: Enable a GPE.
0075  *
0076  ******************************************************************************/
0077 
0078 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
0079 {
0080     acpi_status status;
0081 
0082     ACPI_FUNCTION_TRACE(ev_enable_gpe);
0083 
0084     /* Enable the requested GPE */
0085 
0086     status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
0087     return_ACPI_STATUS(status);
0088 }
0089 
0090 /*******************************************************************************
0091  *
0092  * FUNCTION:    acpi_ev_mask_gpe
0093  *
0094  * PARAMETERS:  gpe_event_info          - GPE to be blocked/unblocked
0095  *              is_masked               - Whether the GPE is masked or not
0096  *
0097  * RETURN:      Status
0098  *
0099  * DESCRIPTION: Unconditionally mask/unmask a GPE during runtime.
0100  *
0101  ******************************************************************************/
0102 
0103 acpi_status
0104 acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
0105 {
0106     struct acpi_gpe_register_info *gpe_register_info;
0107     u32 register_bit;
0108 
0109     ACPI_FUNCTION_TRACE(ev_mask_gpe);
0110 
0111     gpe_register_info = gpe_event_info->register_info;
0112     if (!gpe_register_info) {
0113         return_ACPI_STATUS(AE_NOT_EXIST);
0114     }
0115 
0116     register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
0117 
0118     /* Perform the action */
0119 
0120     if (is_masked) {
0121         if (register_bit & gpe_register_info->mask_for_run) {
0122             return_ACPI_STATUS(AE_BAD_PARAMETER);
0123         }
0124 
0125         (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
0126         ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
0127     } else {
0128         if (!(register_bit & gpe_register_info->mask_for_run)) {
0129             return_ACPI_STATUS(AE_BAD_PARAMETER);
0130         }
0131 
0132         ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
0133                    (u8)register_bit);
0134         if (gpe_event_info->runtime_count
0135             && !gpe_event_info->disable_for_dispatch) {
0136             (void)acpi_hw_low_set_gpe(gpe_event_info,
0137                           ACPI_GPE_ENABLE);
0138         }
0139     }
0140 
0141     return_ACPI_STATUS(AE_OK);
0142 }
0143 
0144 /*******************************************************************************
0145  *
0146  * FUNCTION:    acpi_ev_add_gpe_reference
0147  *
0148  * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
0149  *              clear_on_enable         - Clear GPE status before enabling it
0150  *
0151  * RETURN:      Status
0152  *
0153  * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
0154  *              hardware-enabled.
0155  *
0156  ******************************************************************************/
0157 
0158 acpi_status
0159 acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
0160               u8 clear_on_enable)
0161 {
0162     acpi_status status = AE_OK;
0163 
0164     ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
0165 
0166     if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
0167         return_ACPI_STATUS(AE_LIMIT);
0168     }
0169 
0170     gpe_event_info->runtime_count++;
0171     if (gpe_event_info->runtime_count == 1) {
0172 
0173         /* Enable on first reference */
0174 
0175         if (clear_on_enable) {
0176             (void)acpi_hw_clear_gpe(gpe_event_info);
0177         }
0178 
0179         status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
0180         if (ACPI_SUCCESS(status)) {
0181             status = acpi_ev_enable_gpe(gpe_event_info);
0182         }
0183 
0184         if (ACPI_FAILURE(status)) {
0185             gpe_event_info->runtime_count--;
0186         }
0187     }
0188 
0189     return_ACPI_STATUS(status);
0190 }
0191 
0192 /*******************************************************************************
0193  *
0194  * FUNCTION:    acpi_ev_remove_gpe_reference
0195  *
0196  * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
0197  *
0198  * RETURN:      Status
0199  *
0200  * DESCRIPTION: Remove a reference to a GPE. When the last reference is
0201  *              removed, the GPE is hardware-disabled.
0202  *
0203  ******************************************************************************/
0204 
0205 acpi_status
0206 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
0207 {
0208     acpi_status status = AE_OK;
0209 
0210     ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
0211 
0212     if (!gpe_event_info->runtime_count) {
0213         return_ACPI_STATUS(AE_LIMIT);
0214     }
0215 
0216     gpe_event_info->runtime_count--;
0217     if (!gpe_event_info->runtime_count) {
0218 
0219         /* Disable on last reference */
0220 
0221         status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
0222         if (ACPI_SUCCESS(status)) {
0223             status =
0224                 acpi_hw_low_set_gpe(gpe_event_info,
0225                         ACPI_GPE_DISABLE);
0226         }
0227 
0228         if (ACPI_FAILURE(status)) {
0229             gpe_event_info->runtime_count++;
0230         }
0231     }
0232 
0233     return_ACPI_STATUS(status);
0234 }
0235 
0236 /*******************************************************************************
0237  *
0238  * FUNCTION:    acpi_ev_low_get_gpe_info
0239  *
0240  * PARAMETERS:  gpe_number          - Raw GPE number
0241  *              gpe_block           - A GPE info block
0242  *
0243  * RETURN:      A GPE event_info struct. NULL if not a valid GPE (The gpe_number
0244  *              is not within the specified GPE block)
0245  *
0246  * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
0247  *              the low-level implementation of ev_get_gpe_event_info.
0248  *
0249  ******************************************************************************/
0250 
0251 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
0252                              struct acpi_gpe_block_info
0253                              *gpe_block)
0254 {
0255     u32 gpe_index;
0256 
0257     /*
0258      * Validate that the gpe_number is within the specified gpe_block.
0259      * (Two steps)
0260      */
0261     if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
0262         return (NULL);
0263     }
0264 
0265     gpe_index = gpe_number - gpe_block->block_base_number;
0266     if (gpe_index >= gpe_block->gpe_count) {
0267         return (NULL);
0268     }
0269 
0270     return (&gpe_block->event_info[gpe_index]);
0271 }
0272 
0273 
0274 /*******************************************************************************
0275  *
0276  * FUNCTION:    acpi_ev_get_gpe_event_info
0277  *
0278  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
0279  *              gpe_number          - Raw GPE number
0280  *
0281  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
0282  *
0283  * DESCRIPTION: Returns the event_info struct associated with this GPE.
0284  *              Validates the gpe_block and the gpe_number
0285  *
0286  *              Should be called only when the GPE lists are semaphore locked
0287  *              and not subject to change.
0288  *
0289  ******************************************************************************/
0290 
0291 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
0292                                u32 gpe_number)
0293 {
0294     union acpi_operand_object *obj_desc;
0295     struct acpi_gpe_event_info *gpe_info;
0296     u32 i;
0297 
0298     ACPI_FUNCTION_ENTRY();
0299 
0300     /* A NULL gpe_device means use the FADT-defined GPE block(s) */
0301 
0302     if (!gpe_device) {
0303 
0304         /* Examine GPE Block 0 and 1 (These blocks are permanent) */
0305 
0306         for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
0307             gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
0308                                 acpi_gbl_gpe_fadt_blocks
0309                                 [i]);
0310             if (gpe_info) {
0311                 return (gpe_info);
0312             }
0313         }
0314 
0315         /* The gpe_number was not in the range of either FADT GPE block */
0316 
0317         return (NULL);
0318     }
0319 
0320     /* A Non-NULL gpe_device means this is a GPE Block Device */
0321 
0322     obj_desc =
0323         acpi_ns_get_attached_object((struct acpi_namespace_node *)
0324                            gpe_device);
0325     if (!obj_desc || !obj_desc->device.gpe_block) {
0326         return (NULL);
0327     }
0328 
0329     return (acpi_ev_low_get_gpe_info
0330         (gpe_number, obj_desc->device.gpe_block));
0331 }
0332 
0333 /*******************************************************************************
0334  *
0335  * FUNCTION:    acpi_ev_gpe_detect
0336  *
0337  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
0338  *                                    Can have multiple GPE blocks attached.
0339  *
0340  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
0341  *
0342  * DESCRIPTION: Detect if any GP events have occurred. This function is
0343  *              executed at interrupt level.
0344  *
0345  ******************************************************************************/
0346 
0347 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
0348 {
0349     struct acpi_gpe_block_info *gpe_block;
0350     struct acpi_namespace_node *gpe_device;
0351     struct acpi_gpe_register_info *gpe_register_info;
0352     struct acpi_gpe_event_info *gpe_event_info;
0353     u32 gpe_number;
0354     u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
0355     acpi_cpu_flags flags;
0356     u32 i;
0357     u32 j;
0358 
0359     ACPI_FUNCTION_NAME(ev_gpe_detect);
0360 
0361     /* Check for the case where there are no GPEs */
0362 
0363     if (!gpe_xrupt_list) {
0364         return (int_status);
0365     }
0366 
0367     /*
0368      * We need to obtain the GPE lock for both the data structs and registers
0369      * Note: Not necessary to obtain the hardware lock, since the GPE
0370      * registers are owned by the gpe_lock.
0371      */
0372     flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0373 
0374     /* Examine all GPE blocks attached to this interrupt level */
0375 
0376     gpe_block = gpe_xrupt_list->gpe_block_list_head;
0377     while (gpe_block) {
0378         gpe_device = gpe_block->node;
0379 
0380         /*
0381          * Read all of the 8-bit GPE status and enable registers in this GPE
0382          * block, saving all of them. Find all currently active GP events.
0383          */
0384         for (i = 0; i < gpe_block->register_count; i++) {
0385 
0386             /* Get the next status/enable pair */
0387 
0388             gpe_register_info = &gpe_block->register_info[i];
0389 
0390             /*
0391              * Optimization: If there are no GPEs enabled within this
0392              * register, we can safely ignore the entire register.
0393              */
0394             if (!(gpe_register_info->enable_for_run |
0395                   gpe_register_info->enable_for_wake)) {
0396                 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
0397                           "Ignore disabled registers for GPE %02X-%02X: "
0398                           "RunEnable=%02X, WakeEnable=%02X\n",
0399                           gpe_register_info->
0400                           base_gpe_number,
0401                           gpe_register_info->
0402                           base_gpe_number +
0403                           (ACPI_GPE_REGISTER_WIDTH - 1),
0404                           gpe_register_info->
0405                           enable_for_run,
0406                           gpe_register_info->
0407                           enable_for_wake));
0408                 continue;
0409             }
0410 
0411             /* Now look at the individual GPEs in this byte register */
0412 
0413             for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
0414 
0415                 /* Detect and dispatch one GPE bit */
0416 
0417                 gpe_event_info =
0418                     &gpe_block->
0419                     event_info[((acpi_size)i *
0420                         ACPI_GPE_REGISTER_WIDTH) + j];
0421                 gpe_number =
0422                     j + gpe_register_info->base_gpe_number;
0423                 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0424                 int_status |=
0425                     acpi_ev_detect_gpe(gpe_device,
0426                                gpe_event_info,
0427                                gpe_number);
0428                 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0429             }
0430         }
0431 
0432         gpe_block = gpe_block->next;
0433     }
0434 
0435     acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0436     return (int_status);
0437 }
0438 
0439 /*******************************************************************************
0440  *
0441  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
0442  *
0443  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
0444  *
0445  * RETURN:      None
0446  *
0447  * DESCRIPTION: Perform the actual execution of a GPE control method. This
0448  *              function is called from an invocation of acpi_os_execute and
0449  *              therefore does NOT execute at interrupt level - so that
0450  *              the control method itself is not executed in the context of
0451  *              an interrupt handler.
0452  *
0453  ******************************************************************************/
0454 
0455 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
0456 {
0457     struct acpi_gpe_event_info *gpe_event_info = context;
0458     acpi_status status = AE_OK;
0459     struct acpi_evaluate_info *info;
0460     struct acpi_gpe_notify_info *notify;
0461 
0462     ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
0463 
0464     /* Do the correct dispatch - normal method or implicit notify */
0465 
0466     switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
0467     case ACPI_GPE_DISPATCH_NOTIFY:
0468         /*
0469          * Implicit notify.
0470          * Dispatch a DEVICE_WAKE notify to the appropriate handler.
0471          * NOTE: the request is queued for execution after this method
0472          * completes. The notify handlers are NOT invoked synchronously
0473          * from this thread -- because handlers may in turn run other
0474          * control methods.
0475          *
0476          * June 2012: Expand implicit notify mechanism to support
0477          * notifies on multiple device objects.
0478          */
0479         notify = gpe_event_info->dispatch.notify_list;
0480         while (ACPI_SUCCESS(status) && notify) {
0481             status =
0482                 acpi_ev_queue_notify_request(notify->device_node,
0483                              ACPI_NOTIFY_DEVICE_WAKE);
0484 
0485             notify = notify->next;
0486         }
0487 
0488         break;
0489 
0490     case ACPI_GPE_DISPATCH_METHOD:
0491 
0492         /* Allocate the evaluation information block */
0493 
0494         info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
0495         if (!info) {
0496             status = AE_NO_MEMORY;
0497         } else {
0498             /*
0499              * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
0500              * _Lxx/_Exx control method that corresponds to this GPE
0501              */
0502             info->prefix_node =
0503                 gpe_event_info->dispatch.method_node;
0504             info->flags = ACPI_IGNORE_RETURN_VALUE;
0505 
0506             status = acpi_ns_evaluate(info);
0507             ACPI_FREE(info);
0508         }
0509 
0510         if (ACPI_FAILURE(status)) {
0511             ACPI_EXCEPTION((AE_INFO, status,
0512                     "while evaluating GPE method [%4.4s]",
0513                     acpi_ut_get_node_name(gpe_event_info->
0514                                   dispatch.
0515                                   method_node)));
0516         }
0517         break;
0518 
0519     default:
0520 
0521         goto error_exit;    /* Should never happen */
0522     }
0523 
0524     /* Defer enabling of GPE until all notify handlers are done */
0525 
0526     status = acpi_os_execute(OSL_NOTIFY_HANDLER,
0527                  acpi_ev_asynch_enable_gpe, gpe_event_info);
0528     if (ACPI_SUCCESS(status)) {
0529         return_VOID;
0530     }
0531 
0532 error_exit:
0533     acpi_ev_asynch_enable_gpe(gpe_event_info);
0534     return_VOID;
0535 }
0536 
0537 
0538 /*******************************************************************************
0539  *
0540  * FUNCTION:    acpi_ev_asynch_enable_gpe
0541  *
0542  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
0543  *              Callback from acpi_os_execute
0544  *
0545  * RETURN:      None
0546  *
0547  * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
0548  *              complete (i.e., finish execution of Notify)
0549  *
0550  ******************************************************************************/
0551 
0552 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
0553 {
0554     struct acpi_gpe_event_info *gpe_event_info = context;
0555     acpi_cpu_flags flags;
0556 
0557     flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0558     (void)acpi_ev_finish_gpe(gpe_event_info);
0559     acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0560 
0561     return;
0562 }
0563 
0564 
0565 /*******************************************************************************
0566  *
0567  * FUNCTION:    acpi_ev_finish_gpe
0568  *
0569  * PARAMETERS:  gpe_event_info      - Info for this GPE
0570  *
0571  * RETURN:      Status
0572  *
0573  * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
0574  *              of a GPE method or a synchronous or asynchronous GPE handler.
0575  *
0576  ******************************************************************************/
0577 
0578 acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
0579 {
0580     acpi_status status;
0581 
0582     if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
0583         ACPI_GPE_LEVEL_TRIGGERED) {
0584         /*
0585          * GPE is level-triggered, we clear the GPE status bit after
0586          * handling the event.
0587          */
0588         status = acpi_hw_clear_gpe(gpe_event_info);
0589         if (ACPI_FAILURE(status)) {
0590             return (status);
0591         }
0592     }
0593 
0594     /*
0595      * Enable this GPE, conditionally. This means that the GPE will
0596      * only be physically enabled if the enable_mask bit is set
0597      * in the event_info.
0598      */
0599     (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
0600     gpe_event_info->disable_for_dispatch = FALSE;
0601     return (AE_OK);
0602 }
0603 
0604 
0605 /*******************************************************************************
0606  *
0607  * FUNCTION:    acpi_ev_detect_gpe
0608  *
0609  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
0610  *              gpe_event_info      - Info for this GPE
0611  *              gpe_number          - Number relative to the parent GPE block
0612  *
0613  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
0614  *
0615  * DESCRIPTION: Detect and dispatch a General Purpose Event to either a function
0616  *              (e.g. EC) or method (e.g. _Lxx/_Exx) handler.
0617  * NOTE:        GPE is W1C, so it is possible to handle a single GPE from both
0618  *              task and irq context in parallel as long as the process to
0619  *              detect and mask the GPE is atomic.
0620  *              However the atomicity of ACPI_GPE_DISPATCH_RAW_HANDLER is
0621  *              dependent on the raw handler itself.
0622  *
0623  ******************************************************************************/
0624 
0625 u32
0626 acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
0627            struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
0628 {
0629     u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
0630     u8 enabled_status_byte;
0631     u64 status_reg;
0632     u64 enable_reg;
0633     u32 register_bit;
0634     struct acpi_gpe_register_info *gpe_register_info;
0635     struct acpi_gpe_handler_info *gpe_handler_info;
0636     acpi_cpu_flags flags;
0637     acpi_status status;
0638 
0639     ACPI_FUNCTION_TRACE(ev_gpe_detect);
0640 
0641     flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0642 
0643     if (!gpe_event_info) {
0644         gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
0645         if (!gpe_event_info)
0646             goto error_exit;
0647     }
0648 
0649     /* Get the info block for the entire GPE register */
0650 
0651     gpe_register_info = gpe_event_info->register_info;
0652 
0653     /* Get the register bitmask for this GPE */
0654 
0655     register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
0656 
0657     /* GPE currently enabled (enable bit == 1)? */
0658 
0659     status = acpi_hw_gpe_read(&enable_reg, &gpe_register_info->enable_address);
0660     if (ACPI_FAILURE(status)) {
0661         goto error_exit;
0662     }
0663 
0664     /* GPE currently active (status bit == 1)? */
0665 
0666     status = acpi_hw_gpe_read(&status_reg, &gpe_register_info->status_address);
0667     if (ACPI_FAILURE(status)) {
0668         goto error_exit;
0669     }
0670 
0671     /* Check if there is anything active at all in this GPE */
0672 
0673     ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
0674               "Read registers for GPE %02X: Status=%02X, Enable=%02X, "
0675               "RunEnable=%02X, WakeEnable=%02X\n",
0676               gpe_number,
0677               (u32)(status_reg & register_bit),
0678               (u32)(enable_reg & register_bit),
0679               gpe_register_info->enable_for_run,
0680               gpe_register_info->enable_for_wake));
0681 
0682     enabled_status_byte = (u8)(status_reg & enable_reg);
0683     if (!(enabled_status_byte & register_bit)) {
0684         goto error_exit;
0685     }
0686 
0687     /* Invoke global event handler if present */
0688 
0689     acpi_gpe_count++;
0690     if (acpi_gbl_global_event_handler) {
0691         acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE,
0692                           gpe_device, gpe_number,
0693                           acpi_gbl_global_event_handler_context);
0694     }
0695 
0696     /* Found an active GPE */
0697 
0698     if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
0699         ACPI_GPE_DISPATCH_RAW_HANDLER) {
0700 
0701         /* Dispatch the event to a raw handler */
0702 
0703         gpe_handler_info = gpe_event_info->dispatch.handler;
0704 
0705         /*
0706          * There is no protection around the namespace node
0707          * and the GPE handler to ensure a safe destruction
0708          * because:
0709          * 1. The namespace node is expected to always
0710          *    exist after loading a table.
0711          * 2. The GPE handler is expected to be flushed by
0712          *    acpi_os_wait_events_complete() before the
0713          *    destruction.
0714          */
0715         acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0716         int_status |=
0717             gpe_handler_info->address(gpe_device, gpe_number,
0718                           gpe_handler_info->context);
0719         flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
0720     } else {
0721         /* Dispatch the event to a standard handler or method. */
0722 
0723         int_status |= acpi_ev_gpe_dispatch(gpe_device,
0724                            gpe_event_info, gpe_number);
0725     }
0726 
0727 error_exit:
0728     acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
0729     return (int_status);
0730 }
0731 
0732 /*******************************************************************************
0733  *
0734  * FUNCTION:    acpi_ev_gpe_dispatch
0735  *
0736  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
0737  *              gpe_event_info      - Info for this GPE
0738  *              gpe_number          - Number relative to the parent GPE block
0739  *
0740  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
0741  *
0742  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
0743  *              or method (e.g. _Lxx/_Exx) handler.
0744  *
0745  ******************************************************************************/
0746 
0747 u32
0748 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
0749              struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
0750 {
0751     acpi_status status;
0752     u32 return_value;
0753 
0754     ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
0755 
0756     /*
0757      * Always disable the GPE so that it does not keep firing before
0758      * any asynchronous activity completes (either from the execution
0759      * of a GPE method or an asynchronous GPE handler.)
0760      *
0761      * If there is no handler or method to run, just disable the
0762      * GPE and leave it disabled permanently to prevent further such
0763      * pointless events from firing.
0764      */
0765     status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
0766     if (ACPI_FAILURE(status)) {
0767         ACPI_EXCEPTION((AE_INFO, status,
0768                 "Unable to disable GPE %02X", gpe_number));
0769         return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
0770     }
0771 
0772     /*
0773      * If edge-triggered, clear the GPE status bit now. Note that
0774      * level-triggered events are cleared after the GPE is serviced.
0775      */
0776     if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
0777         ACPI_GPE_EDGE_TRIGGERED) {
0778         status = acpi_hw_clear_gpe(gpe_event_info);
0779         if (ACPI_FAILURE(status)) {
0780             ACPI_EXCEPTION((AE_INFO, status,
0781                     "Unable to clear GPE %02X",
0782                     gpe_number));
0783             (void)acpi_hw_low_set_gpe(gpe_event_info,
0784                           ACPI_GPE_CONDITIONAL_ENABLE);
0785             return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
0786         }
0787     }
0788 
0789     gpe_event_info->disable_for_dispatch = TRUE;
0790 
0791     /*
0792      * Dispatch the GPE to either an installed handler or the control
0793      * method associated with this GPE (_Lxx or _Exx). If a handler
0794      * exists, we invoke it and do not attempt to run the method.
0795      * If there is neither a handler nor a method, leave the GPE
0796      * disabled.
0797      */
0798     switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
0799     case ACPI_GPE_DISPATCH_HANDLER:
0800 
0801         /* Invoke the installed handler (at interrupt level) */
0802 
0803         return_value =
0804             gpe_event_info->dispatch.handler->address(gpe_device,
0805                                   gpe_number,
0806                                   gpe_event_info->
0807                                   dispatch.handler->
0808                                   context);
0809 
0810         /* If requested, clear (if level-triggered) and re-enable the GPE */
0811 
0812         if (return_value & ACPI_REENABLE_GPE) {
0813             (void)acpi_ev_finish_gpe(gpe_event_info);
0814         }
0815         break;
0816 
0817     case ACPI_GPE_DISPATCH_METHOD:
0818     case ACPI_GPE_DISPATCH_NOTIFY:
0819         /*
0820          * Execute the method associated with the GPE
0821          * NOTE: Level-triggered GPEs are cleared after the method completes.
0822          */
0823         status = acpi_os_execute(OSL_GPE_HANDLER,
0824                      acpi_ev_asynch_execute_gpe_method,
0825                      gpe_event_info);
0826         if (ACPI_FAILURE(status)) {
0827             ACPI_EXCEPTION((AE_INFO, status,
0828                     "Unable to queue handler for GPE %02X - event disabled",
0829                     gpe_number));
0830         }
0831         break;
0832 
0833     default:
0834         /*
0835          * No handler or method to run!
0836          * 03/2010: This case should no longer be possible. We will not allow
0837          * a GPE to be enabled if it has no handler or method.
0838          */
0839         ACPI_ERROR((AE_INFO,
0840                 "No handler or method for GPE %02X, disabling event",
0841                 gpe_number));
0842 
0843         break;
0844     }
0845 
0846     return_UINT32(ACPI_INTERRUPT_HANDLED);
0847 }
0848 
0849 #endif              /* !ACPI_REDUCED_HARDWARE */