Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 //
0003 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
0004 //
0005 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
0006 //          Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
0007 //
0008 
0009 #include <linux/io-64-nonatomic-lo-hi.h>
0010 #include <linux/slab.h>
0011 #include <sound/hdaudio_ext.h>
0012 #include "avs.h"
0013 #include "messages.h"
0014 #include "registers.h"
0015 #include "trace.h"
0016 
0017 #define AVS_IPC_TIMEOUT_MS  300
0018 #define AVS_D0IX_DELAY_MS   300
0019 
0020 static int
0021 avs_dsp_set_d0ix(struct avs_dev *adev, bool enable)
0022 {
0023     struct avs_ipc *ipc = adev->ipc;
0024     int ret;
0025 
0026     /* Is transition required? */
0027     if (ipc->in_d0ix == enable)
0028         return 0;
0029 
0030     ret = avs_dsp_op(adev, set_d0ix, enable);
0031     if (ret) {
0032         /* Prevent further d0ix attempts on conscious IPC failure. */
0033         if (ret == -AVS_EIPC)
0034             atomic_inc(&ipc->d0ix_disable_depth);
0035 
0036         ipc->in_d0ix = false;
0037         return ret;
0038     }
0039 
0040     ipc->in_d0ix = enable;
0041     return 0;
0042 }
0043 
0044 static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx)
0045 {
0046     if (atomic_read(&adev->ipc->d0ix_disable_depth))
0047         return;
0048 
0049     mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work,
0050              msecs_to_jiffies(AVS_D0IX_DELAY_MS));
0051 }
0052 
0053 static void avs_dsp_d0ix_work(struct work_struct *work)
0054 {
0055     struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work);
0056 
0057     avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true);
0058 }
0059 
0060 static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx)
0061 {
0062     struct avs_ipc *ipc = adev->ipc;
0063 
0064     if (!atomic_read(&ipc->d0ix_disable_depth)) {
0065         cancel_delayed_work_sync(&ipc->d0ix_work);
0066         return avs_dsp_set_d0ix(adev, false);
0067     }
0068 
0069     return 0;
0070 }
0071 
0072 int avs_dsp_disable_d0ix(struct avs_dev *adev)
0073 {
0074     struct avs_ipc *ipc = adev->ipc;
0075 
0076     /* Prevent PG only on the first disable. */
0077     if (atomic_add_return(1, &ipc->d0ix_disable_depth) == 1) {
0078         cancel_delayed_work_sync(&ipc->d0ix_work);
0079         return avs_dsp_set_d0ix(adev, false);
0080     }
0081 
0082     return 0;
0083 }
0084 
0085 int avs_dsp_enable_d0ix(struct avs_dev *adev)
0086 {
0087     struct avs_ipc *ipc = adev->ipc;
0088 
0089     if (atomic_dec_and_test(&ipc->d0ix_disable_depth))
0090         queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work,
0091                    msecs_to_jiffies(AVS_D0IX_DELAY_MS));
0092     return 0;
0093 }
0094 
0095 static void avs_dsp_recovery(struct avs_dev *adev)
0096 {
0097     struct avs_soc_component *acomp;
0098     unsigned int core_mask;
0099     int ret;
0100 
0101     mutex_lock(&adev->comp_list_mutex);
0102     /* disconnect all running streams */
0103     list_for_each_entry(acomp, &adev->comp_list, node) {
0104         struct snd_soc_pcm_runtime *rtd;
0105         struct snd_soc_card *card;
0106 
0107         card = acomp->base.card;
0108         if (!card)
0109             continue;
0110 
0111         for_each_card_rtds(card, rtd) {
0112             struct snd_pcm *pcm;
0113             int dir;
0114 
0115             pcm = rtd->pcm;
0116             if (!pcm || rtd->dai_link->no_pcm)
0117                 continue;
0118 
0119             for_each_pcm_streams(dir) {
0120                 struct snd_pcm_substream *substream;
0121 
0122                 substream = pcm->streams[dir].substream;
0123                 if (!substream || !substream->runtime)
0124                     continue;
0125 
0126                 snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
0127             }
0128         }
0129     }
0130     mutex_unlock(&adev->comp_list_mutex);
0131 
0132     /* forcibly shutdown all cores */
0133     core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0);
0134     avs_dsp_core_disable(adev, core_mask);
0135 
0136     /* attempt dsp reboot */
0137     ret = avs_dsp_boot_firmware(adev, true);
0138     if (ret < 0)
0139         dev_err(adev->dev, "dsp reboot failed: %d\n", ret);
0140 
0141     pm_runtime_mark_last_busy(adev->dev);
0142     pm_runtime_enable(adev->dev);
0143     pm_request_autosuspend(adev->dev);
0144 
0145     atomic_set(&adev->ipc->recovering, 0);
0146 }
0147 
0148 static void avs_dsp_recovery_work(struct work_struct *work)
0149 {
0150     struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work);
0151 
0152     avs_dsp_recovery(to_avs_dev(ipc->dev));
0153 }
0154 
0155 static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg)
0156 {
0157     struct avs_ipc *ipc = adev->ipc;
0158 
0159     /* Account for the double-exception case. */
0160     ipc->ready = false;
0161 
0162     if (!atomic_add_unless(&ipc->recovering, 1, 1)) {
0163         dev_err(adev->dev, "dsp recovery is already in progress\n");
0164         return;
0165     }
0166 
0167     dev_crit(adev->dev, "communication severed, rebooting dsp..\n");
0168 
0169     cancel_delayed_work_sync(&ipc->d0ix_work);
0170     ipc->in_d0ix = false;
0171     /* Re-enabled on recovery completion. */
0172     pm_runtime_disable(adev->dev);
0173 
0174     /* Process received notification. */
0175     avs_dsp_op(adev, coredump, msg);
0176 
0177     schedule_work(&ipc->recovery_work);
0178 }
0179 
0180 static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header)
0181 {
0182     struct avs_ipc *ipc = adev->ipc;
0183     union avs_reply_msg msg = AVS_MSG(header);
0184     u64 reg;
0185 
0186     reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
0187     trace_avs_ipc_reply_msg(header, reg);
0188 
0189     ipc->rx.header = header;
0190     /* Abort copying payload if request processing was unsuccessful. */
0191     if (!msg.status) {
0192         /* update size in case of LARGE_CONFIG_GET */
0193         if (msg.msg_target == AVS_MOD_MSG &&
0194             msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET)
0195             ipc->rx.size = msg.ext.large_config.data_off_size;
0196 
0197         memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size);
0198         trace_avs_msg_payload(ipc->rx.data, ipc->rx.size);
0199     }
0200 }
0201 
0202 static void avs_dsp_process_notification(struct avs_dev *adev, u64 header)
0203 {
0204     struct avs_notify_mod_data mod_data;
0205     union avs_notify_msg msg = AVS_MSG(header);
0206     size_t data_size = 0;
0207     void *data = NULL;
0208     u64 reg;
0209 
0210     reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
0211     trace_avs_ipc_notify_msg(header, reg);
0212 
0213     /* Ignore spurious notifications until handshake is established. */
0214     if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) {
0215         dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary);
0216         return;
0217     }
0218 
0219     /* Calculate notification payload size. */
0220     switch (msg.notify_msg_type) {
0221     case AVS_NOTIFY_FW_READY:
0222         break;
0223 
0224     case AVS_NOTIFY_PHRASE_DETECTED:
0225         data_size = sizeof(struct avs_notify_voice_data);
0226         break;
0227 
0228     case AVS_NOTIFY_RESOURCE_EVENT:
0229         data_size = sizeof(struct avs_notify_res_data);
0230         break;
0231 
0232     case AVS_NOTIFY_LOG_BUFFER_STATUS:
0233     case AVS_NOTIFY_EXCEPTION_CAUGHT:
0234         break;
0235 
0236     case AVS_NOTIFY_MODULE_EVENT:
0237         /* To know the total payload size, header needs to be read first. */
0238         memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data));
0239         data_size = sizeof(mod_data) + mod_data.data_size;
0240         break;
0241 
0242     default:
0243         dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary);
0244         break;
0245     }
0246 
0247     if (data_size) {
0248         data = kmalloc(data_size, GFP_KERNEL);
0249         if (!data)
0250             return;
0251 
0252         memcpy_fromio(data, avs_uplink_addr(adev), data_size);
0253         trace_avs_msg_payload(data, data_size);
0254     }
0255 
0256     /* Perform notification-specific operations. */
0257     switch (msg.notify_msg_type) {
0258     case AVS_NOTIFY_FW_READY:
0259         dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary);
0260         adev->ipc->ready = true;
0261         complete(&adev->fw_ready);
0262         break;
0263 
0264     case AVS_NOTIFY_LOG_BUFFER_STATUS:
0265         avs_dsp_op(adev, log_buffer_status, &msg);
0266         break;
0267 
0268     case AVS_NOTIFY_EXCEPTION_CAUGHT:
0269         avs_dsp_exception_caught(adev, &msg);
0270         break;
0271 
0272     default:
0273         break;
0274     }
0275 
0276     kfree(data);
0277 }
0278 
0279 void avs_dsp_process_response(struct avs_dev *adev, u64 header)
0280 {
0281     struct avs_ipc *ipc = adev->ipc;
0282 
0283     /*
0284      * Response may either be solicited - a reply for a request that has
0285      * been sent beforehand - or unsolicited (notification).
0286      */
0287     if (avs_msg_is_reply(header)) {
0288         /* Response processing is invoked from IRQ thread. */
0289         spin_lock_irq(&ipc->rx_lock);
0290         avs_dsp_receive_rx(adev, header);
0291         ipc->rx_completed = true;
0292         spin_unlock_irq(&ipc->rx_lock);
0293     } else {
0294         avs_dsp_process_notification(adev, header);
0295     }
0296 
0297     complete(&ipc->busy_completion);
0298 }
0299 
0300 irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id)
0301 {
0302     struct avs_dev *adev = dev_id;
0303     struct avs_ipc *ipc = adev->ipc;
0304     u32 adspis, hipc_rsp, hipc_ack;
0305     irqreturn_t ret = IRQ_NONE;
0306 
0307     adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS);
0308     if (adspis == UINT_MAX || !(adspis & AVS_ADSP_ADSPIS_IPC))
0309         return ret;
0310 
0311     hipc_ack = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCIE);
0312     hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
0313 
0314     /* DSP acked host's request */
0315     if (hipc_ack & SKL_ADSP_HIPCIE_DONE) {
0316         /*
0317          * As an extra precaution, mask done interrupt. Code executed
0318          * due to complete() found below does not assume any masking.
0319          */
0320         snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
0321                       AVS_ADSP_HIPCCTL_DONE, 0);
0322 
0323         complete(&ipc->done_completion);
0324 
0325         /* tell DSP it has our attention */
0326         snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCIE,
0327                       SKL_ADSP_HIPCIE_DONE,
0328                       SKL_ADSP_HIPCIE_DONE);
0329         /* unmask done interrupt */
0330         snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
0331                       AVS_ADSP_HIPCCTL_DONE,
0332                       AVS_ADSP_HIPCCTL_DONE);
0333         ret = IRQ_HANDLED;
0334     }
0335 
0336     /* DSP sent new response to process */
0337     if (hipc_rsp & SKL_ADSP_HIPCT_BUSY) {
0338         /* mask busy interrupt */
0339         snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
0340                       AVS_ADSP_HIPCCTL_BUSY, 0);
0341 
0342         ret = IRQ_WAKE_THREAD;
0343     }
0344 
0345     return ret;
0346 }
0347 
0348 irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id)
0349 {
0350     struct avs_dev *adev = dev_id;
0351     union avs_reply_msg msg;
0352     u32 hipct, hipcte;
0353 
0354     hipct = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
0355     hipcte = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE);
0356 
0357     /* ensure DSP sent new response to process */
0358     if (!(hipct & SKL_ADSP_HIPCT_BUSY))
0359         return IRQ_NONE;
0360 
0361     msg.primary = hipct;
0362     msg.ext.val = hipcte;
0363     avs_dsp_process_response(adev, msg.val);
0364 
0365     /* tell DSP we accepted its message */
0366     snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT,
0367                   SKL_ADSP_HIPCT_BUSY, SKL_ADSP_HIPCT_BUSY);
0368     /* unmask busy interrupt */
0369     snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL,
0370                   AVS_ADSP_HIPCCTL_BUSY, AVS_ADSP_HIPCCTL_BUSY);
0371 
0372     return IRQ_HANDLED;
0373 }
0374 
0375 static bool avs_ipc_is_busy(struct avs_ipc *ipc)
0376 {
0377     struct avs_dev *adev = to_avs_dev(ipc->dev);
0378     u32 hipc_rsp;
0379 
0380     hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT);
0381     return hipc_rsp & SKL_ADSP_HIPCT_BUSY;
0382 }
0383 
0384 static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout)
0385 {
0386     u32 repeats_left = 128; /* to avoid infinite looping */
0387     int ret;
0388 
0389 again:
0390     ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout));
0391 
0392     /* DSP could be unresponsive at this point. */
0393     if (!ipc->ready)
0394         return -EPERM;
0395 
0396     if (!ret) {
0397         if (!avs_ipc_is_busy(ipc))
0398             return -ETIMEDOUT;
0399         /*
0400          * Firmware did its job, either notification or reply
0401          * has been received - now wait until it's processed.
0402          */
0403         wait_for_completion_killable(&ipc->busy_completion);
0404     }
0405 
0406     /* Ongoing notification's bottom-half may cause early wakeup */
0407     spin_lock(&ipc->rx_lock);
0408     if (!ipc->rx_completed) {
0409         if (repeats_left) {
0410             /* Reply delayed due to notification. */
0411             repeats_left--;
0412             reinit_completion(&ipc->busy_completion);
0413             spin_unlock(&ipc->rx_lock);
0414             goto again;
0415         }
0416 
0417         spin_unlock(&ipc->rx_lock);
0418         return -ETIMEDOUT;
0419     }
0420 
0421     spin_unlock(&ipc->rx_lock);
0422     return 0;
0423 }
0424 
0425 static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply)
0426 {
0427     lockdep_assert_held(&ipc->rx_lock);
0428 
0429     ipc->rx.header = 0;
0430     ipc->rx.size = reply ? reply->size : 0;
0431     ipc->rx_completed = false;
0432 
0433     reinit_completion(&ipc->done_completion);
0434     reinit_completion(&ipc->busy_completion);
0435 }
0436 
0437 static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs)
0438 {
0439     u64 reg = ULONG_MAX;
0440 
0441     tx->header |= SKL_ADSP_HIPCI_BUSY;
0442     if (read_fwregs)
0443         reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW));
0444 
0445     trace_avs_request(tx, reg);
0446 
0447     if (tx->size)
0448         memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size);
0449     snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32);
0450     snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX);
0451 }
0452 
0453 static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
0454                    struct avs_ipc_msg *reply, int timeout)
0455 {
0456     struct avs_ipc *ipc = adev->ipc;
0457     int ret;
0458 
0459     if (!ipc->ready)
0460         return -EPERM;
0461 
0462     mutex_lock(&ipc->msg_mutex);
0463 
0464     spin_lock(&ipc->rx_lock);
0465     avs_ipc_msg_init(ipc, reply);
0466     avs_dsp_send_tx(adev, request, true);
0467     spin_unlock(&ipc->rx_lock);
0468 
0469     ret = avs_ipc_wait_busy_completion(ipc, timeout);
0470     if (ret) {
0471         if (ret == -ETIMEDOUT) {
0472             union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT);
0473 
0474             /* Same treatment as on exception, just stack_dump=0. */
0475             avs_dsp_exception_caught(adev, &msg);
0476         }
0477         goto exit;
0478     }
0479 
0480     ret = ipc->rx.rsp.status;
0481     if (reply) {
0482         reply->header = ipc->rx.header;
0483         reply->size = ipc->rx.size;
0484         if (reply->data && ipc->rx.size)
0485             memcpy(reply->data, ipc->rx.data, reply->size);
0486     }
0487 
0488 exit:
0489     mutex_unlock(&ipc->msg_mutex);
0490     return ret;
0491 }
0492 
0493 static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request,
0494                      struct avs_ipc_msg *reply, int timeout, bool wake_d0i0,
0495                      bool schedule_d0ix)
0496 {
0497     int ret;
0498 
0499     trace_avs_d0ix("wake", wake_d0i0, request->header);
0500     if (wake_d0i0) {
0501         ret = avs_dsp_wake_d0i0(adev, request);
0502         if (ret)
0503             return ret;
0504     }
0505 
0506     ret = avs_dsp_do_send_msg(adev, request, reply, timeout);
0507     if (ret)
0508         return ret;
0509 
0510     trace_avs_d0ix("schedule", schedule_d0ix, request->header);
0511     if (schedule_d0ix)
0512         avs_dsp_schedule_d0ix(adev, request);
0513 
0514     return 0;
0515 }
0516 
0517 int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
0518                  struct avs_ipc_msg *reply, int timeout)
0519 {
0520     bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true);
0521     bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false);
0522 
0523     return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix);
0524 }
0525 
0526 int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
0527              struct avs_ipc_msg *reply)
0528 {
0529     return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms);
0530 }
0531 
0532 int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request,
0533                 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0)
0534 {
0535     return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false);
0536 }
0537 
0538 int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request,
0539             struct avs_ipc_msg *reply, bool wake_d0i0)
0540 {
0541     return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms,
0542                        wake_d0i0);
0543 }
0544 
0545 static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
0546 {
0547     struct avs_ipc *ipc = adev->ipc;
0548     int ret;
0549 
0550     mutex_lock(&ipc->msg_mutex);
0551 
0552     spin_lock(&ipc->rx_lock);
0553     avs_ipc_msg_init(ipc, NULL);
0554     /*
0555      * with hw still stalled, memory windows may not be
0556      * configured properly so avoid accessing SRAM
0557      */
0558     avs_dsp_send_tx(adev, request, false);
0559     spin_unlock(&ipc->rx_lock);
0560 
0561     /* ROM messages must be sent before main core is unstalled */
0562     ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false);
0563     if (!ret) {
0564         ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout));
0565         ret = ret ? 0 : -ETIMEDOUT;
0566     }
0567 
0568     mutex_unlock(&ipc->msg_mutex);
0569 
0570     return ret;
0571 }
0572 
0573 int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout)
0574 {
0575     return avs_dsp_do_send_rom_msg(adev, request, timeout);
0576 }
0577 
0578 int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request)
0579 {
0580     return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms);
0581 }
0582 
0583 void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable)
0584 {
0585     u32 value, mask;
0586 
0587     /*
0588      * No particular bit setting order. All of these are required
0589      * to have a functional SW <-> FW communication.
0590      */
0591     value = enable ? AVS_ADSP_ADSPIC_IPC : 0;
0592     snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value);
0593 
0594     mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY;
0595     value = enable ? mask : 0;
0596     snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, mask, value);
0597 }
0598 
0599 int avs_ipc_init(struct avs_ipc *ipc, struct device *dev)
0600 {
0601     ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL);
0602     if (!ipc->rx.data)
0603         return -ENOMEM;
0604 
0605     ipc->dev = dev;
0606     ipc->ready = false;
0607     ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS;
0608     INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work);
0609     INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work);
0610     init_completion(&ipc->done_completion);
0611     init_completion(&ipc->busy_completion);
0612     spin_lock_init(&ipc->rx_lock);
0613     mutex_init(&ipc->msg_mutex);
0614 
0615     return 0;
0616 }
0617 
0618 void avs_ipc_block(struct avs_ipc *ipc)
0619 {
0620     ipc->ready = false;
0621     cancel_work_sync(&ipc->recovery_work);
0622     cancel_delayed_work_sync(&ipc->d0ix_work);
0623     ipc->in_d0ix = false;
0624 }