0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/errno.h>
0012 #include <linux/module.h>
0013 #include <linux/types.h>
0014 #include <linux/init.h>
0015 #include <linux/device.h>
0016 #include <linux/miscdevice.h>
0017 #include <linux/mm.h>
0018 #include <linux/slab.h>
0019 #include <linux/fs.h>
0020 #include <linux/poll.h>
0021 #include <linux/sched/signal.h>
0022 #include <linux/ioctl.h>
0023 #include <linux/uaccess.h>
0024 #include <linux/pm_qos.h>
0025 #include <linux/hsi/hsi.h>
0026 #include <linux/hsi/ssi_protocol.h>
0027 #include <linux/hsi/cs-protocol.h>
0028
0029 #define CS_MMAP_SIZE PAGE_SIZE
0030
0031 struct char_queue {
0032 struct list_head list;
0033 u32 msg;
0034 };
0035
0036 struct cs_char {
0037 unsigned int opened;
0038 struct hsi_client *cl;
0039 struct cs_hsi_iface *hi;
0040 struct list_head chardev_queue;
0041 struct list_head dataind_queue;
0042 int dataind_pending;
0043
0044 unsigned long mmap_base;
0045 unsigned long mmap_size;
0046 spinlock_t lock;
0047 struct fasync_struct *async_queue;
0048 wait_queue_head_t wait;
0049
0050 int channel_id_cmd;
0051 int channel_id_data;
0052 };
0053
0054 #define SSI_CHANNEL_STATE_READING 1
0055 #define SSI_CHANNEL_STATE_WRITING (1 << 1)
0056 #define SSI_CHANNEL_STATE_POLL (1 << 2)
0057 #define SSI_CHANNEL_STATE_ERROR (1 << 3)
0058
0059 #define TARGET_MASK 0xf000000
0060 #define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT)
0061 #define TARGET_LOCAL 0
0062
0063
0064 #define CS_MAX_CMDS 4
0065
0066
0067
0068
0069
0070 #define CS_QOS_LATENCY_FOR_DATA_USEC 20000
0071
0072
0073 #define CS_HSI_TRANSFER_TIMEOUT_MS 500
0074
0075
0076 #define RX_PTR_BOUNDARY_SHIFT 8
0077 #define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \
0078 CS_MAX_BUFFERS_SHIFT)
0079 struct cs_hsi_iface {
0080 struct hsi_client *cl;
0081 struct hsi_client *master;
0082
0083 unsigned int iface_state;
0084 unsigned int wakeline_state;
0085 unsigned int control_state;
0086 unsigned int data_state;
0087
0088
0089 struct cs_mmap_config_block *mmap_cfg;
0090
0091 unsigned long mmap_base;
0092 unsigned long mmap_size;
0093
0094 unsigned int rx_slot;
0095 unsigned int tx_slot;
0096
0097
0098
0099 unsigned int buf_size;
0100 unsigned int rx_bufs;
0101 unsigned int tx_bufs;
0102 unsigned int rx_ptr_boundary;
0103 unsigned int rx_offsets[CS_MAX_BUFFERS];
0104 unsigned int tx_offsets[CS_MAX_BUFFERS];
0105
0106
0107 unsigned int slot_size;
0108 unsigned int flags;
0109
0110 struct list_head cmdqueue;
0111
0112 struct hsi_msg *data_rx_msg;
0113 struct hsi_msg *data_tx_msg;
0114 wait_queue_head_t datawait;
0115
0116 struct pm_qos_request pm_qos_req;
0117
0118 spinlock_t lock;
0119 };
0120
0121 static struct cs_char cs_char_data;
0122
0123 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
0124 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
0125
0126 static inline void rx_ptr_shift_too_big(void)
0127 {
0128 BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
0129 }
0130
0131 static void cs_notify(u32 message, struct list_head *head)
0132 {
0133 struct char_queue *entry;
0134
0135 spin_lock(&cs_char_data.lock);
0136
0137 if (!cs_char_data.opened) {
0138 spin_unlock(&cs_char_data.lock);
0139 goto out;
0140 }
0141
0142 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
0143 if (!entry) {
0144 dev_err(&cs_char_data.cl->device,
0145 "Can't allocate new entry for the queue.\n");
0146 spin_unlock(&cs_char_data.lock);
0147 goto out;
0148 }
0149
0150 entry->msg = message;
0151 list_add_tail(&entry->list, head);
0152
0153 spin_unlock(&cs_char_data.lock);
0154
0155 wake_up_interruptible(&cs_char_data.wait);
0156 kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
0157
0158 out:
0159 return;
0160 }
0161
0162 static u32 cs_pop_entry(struct list_head *head)
0163 {
0164 struct char_queue *entry;
0165 u32 data;
0166
0167 entry = list_entry(head->next, struct char_queue, list);
0168 data = entry->msg;
0169 list_del(&entry->list);
0170 kfree(entry);
0171
0172 return data;
0173 }
0174
0175 static void cs_notify_control(u32 message)
0176 {
0177 cs_notify(message, &cs_char_data.chardev_queue);
0178 }
0179
0180 static void cs_notify_data(u32 message, int maxlength)
0181 {
0182 cs_notify(message, &cs_char_data.dataind_queue);
0183
0184 spin_lock(&cs_char_data.lock);
0185 cs_char_data.dataind_pending++;
0186 while (cs_char_data.dataind_pending > maxlength &&
0187 !list_empty(&cs_char_data.dataind_queue)) {
0188 dev_dbg(&cs_char_data.cl->device, "data notification "
0189 "queue overrun (%u entries)\n", cs_char_data.dataind_pending);
0190
0191 cs_pop_entry(&cs_char_data.dataind_queue);
0192 cs_char_data.dataind_pending--;
0193 }
0194 spin_unlock(&cs_char_data.lock);
0195 }
0196
0197 static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
0198 {
0199 u32 *data = sg_virt(msg->sgt.sgl);
0200 *data = cmd;
0201 }
0202
0203 static inline u32 cs_get_cmd(struct hsi_msg *msg)
0204 {
0205 u32 *data = sg_virt(msg->sgt.sgl);
0206 return *data;
0207 }
0208
0209 static void cs_release_cmd(struct hsi_msg *msg)
0210 {
0211 struct cs_hsi_iface *hi = msg->context;
0212
0213 list_add_tail(&msg->link, &hi->cmdqueue);
0214 }
0215
0216 static void cs_cmd_destructor(struct hsi_msg *msg)
0217 {
0218 struct cs_hsi_iface *hi = msg->context;
0219
0220 spin_lock(&hi->lock);
0221
0222 dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
0223
0224 if (hi->iface_state != CS_STATE_CLOSED)
0225 dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
0226
0227 if (msg->ttype == HSI_MSG_READ)
0228 hi->control_state &=
0229 ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
0230 else if (msg->ttype == HSI_MSG_WRITE &&
0231 hi->control_state & SSI_CHANNEL_STATE_WRITING)
0232 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
0233
0234 cs_release_cmd(msg);
0235
0236 spin_unlock(&hi->lock);
0237 }
0238
0239 static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
0240 {
0241 struct hsi_msg *msg;
0242
0243 BUG_ON(list_empty(&ssi->cmdqueue));
0244
0245 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
0246 list_del(&msg->link);
0247 msg->destructor = cs_cmd_destructor;
0248
0249 return msg;
0250 }
0251
0252 static void cs_free_cmds(struct cs_hsi_iface *ssi)
0253 {
0254 struct hsi_msg *msg, *tmp;
0255
0256 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
0257 list_del(&msg->link);
0258 msg->destructor = NULL;
0259 kfree(sg_virt(msg->sgt.sgl));
0260 hsi_free_msg(msg);
0261 }
0262 }
0263
0264 static int cs_alloc_cmds(struct cs_hsi_iface *hi)
0265 {
0266 struct hsi_msg *msg;
0267 u32 *buf;
0268 unsigned int i;
0269
0270 INIT_LIST_HEAD(&hi->cmdqueue);
0271
0272 for (i = 0; i < CS_MAX_CMDS; i++) {
0273 msg = hsi_alloc_msg(1, GFP_KERNEL);
0274 if (!msg)
0275 goto out;
0276 buf = kmalloc(sizeof(*buf), GFP_KERNEL);
0277 if (!buf) {
0278 hsi_free_msg(msg);
0279 goto out;
0280 }
0281 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
0282 msg->channel = cs_char_data.channel_id_cmd;
0283 msg->context = hi;
0284 list_add_tail(&msg->link, &hi->cmdqueue);
0285 }
0286
0287 return 0;
0288
0289 out:
0290 cs_free_cmds(hi);
0291 return -ENOMEM;
0292 }
0293
0294 static void cs_hsi_data_destructor(struct hsi_msg *msg)
0295 {
0296 struct cs_hsi_iface *hi = msg->context;
0297 const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
0298
0299 dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
0300
0301 spin_lock(&hi->lock);
0302 if (hi->iface_state != CS_STATE_CLOSED)
0303 dev_err(&cs_char_data.cl->device,
0304 "Data %s flush while device active\n", dir);
0305 if (msg->ttype == HSI_MSG_READ)
0306 hi->data_state &=
0307 ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
0308 else
0309 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
0310
0311 msg->status = HSI_STATUS_COMPLETED;
0312 if (unlikely(waitqueue_active(&hi->datawait)))
0313 wake_up_interruptible(&hi->datawait);
0314
0315 spin_unlock(&hi->lock);
0316 }
0317
0318 static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
0319 {
0320 struct hsi_msg *txmsg, *rxmsg;
0321 int res = 0;
0322
0323 rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
0324 if (!rxmsg) {
0325 res = -ENOMEM;
0326 goto out1;
0327 }
0328 rxmsg->channel = cs_char_data.channel_id_data;
0329 rxmsg->destructor = cs_hsi_data_destructor;
0330 rxmsg->context = hi;
0331
0332 txmsg = hsi_alloc_msg(1, GFP_KERNEL);
0333 if (!txmsg) {
0334 res = -ENOMEM;
0335 goto out2;
0336 }
0337 txmsg->channel = cs_char_data.channel_id_data;
0338 txmsg->destructor = cs_hsi_data_destructor;
0339 txmsg->context = hi;
0340
0341 hi->data_rx_msg = rxmsg;
0342 hi->data_tx_msg = txmsg;
0343
0344 return 0;
0345
0346 out2:
0347 hsi_free_msg(rxmsg);
0348 out1:
0349 return res;
0350 }
0351
0352 static void cs_hsi_free_data_msg(struct hsi_msg *msg)
0353 {
0354 WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
0355 msg->status != HSI_STATUS_ERROR);
0356 hsi_free_msg(msg);
0357 }
0358
0359 static void cs_hsi_free_data(struct cs_hsi_iface *hi)
0360 {
0361 cs_hsi_free_data_msg(hi->data_rx_msg);
0362 cs_hsi_free_data_msg(hi->data_tx_msg);
0363 }
0364
0365 static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
0366 struct hsi_msg *msg, const char *info,
0367 unsigned int *state)
0368 {
0369 spin_lock(&hi->lock);
0370 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
0371 info, msg->status, *state);
0372 }
0373
0374 static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
0375 {
0376 spin_unlock(&hi->lock);
0377 }
0378
0379 static inline void __cs_hsi_error_read_bits(unsigned int *state)
0380 {
0381 *state |= SSI_CHANNEL_STATE_ERROR;
0382 *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
0383 }
0384
0385 static inline void __cs_hsi_error_write_bits(unsigned int *state)
0386 {
0387 *state |= SSI_CHANNEL_STATE_ERROR;
0388 *state &= ~SSI_CHANNEL_STATE_WRITING;
0389 }
0390
0391 static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
0392 struct hsi_msg *msg)
0393 {
0394 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
0395 cs_release_cmd(msg);
0396 __cs_hsi_error_read_bits(&hi->control_state);
0397 __cs_hsi_error_post(hi);
0398 }
0399
0400 static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
0401 struct hsi_msg *msg)
0402 {
0403 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
0404 cs_release_cmd(msg);
0405 __cs_hsi_error_write_bits(&hi->control_state);
0406 __cs_hsi_error_post(hi);
0407
0408 }
0409
0410 static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
0411 {
0412 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
0413 __cs_hsi_error_read_bits(&hi->data_state);
0414 __cs_hsi_error_post(hi);
0415 }
0416
0417 static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
0418 struct hsi_msg *msg)
0419 {
0420 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
0421 __cs_hsi_error_write_bits(&hi->data_state);
0422 __cs_hsi_error_post(hi);
0423 }
0424
0425 static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
0426 {
0427 u32 cmd = cs_get_cmd(msg);
0428 struct cs_hsi_iface *hi = msg->context;
0429
0430 spin_lock(&hi->lock);
0431 hi->control_state &= ~SSI_CHANNEL_STATE_READING;
0432 if (msg->status == HSI_STATUS_ERROR) {
0433 dev_err(&hi->cl->device, "Control RX error detected\n");
0434 spin_unlock(&hi->lock);
0435 cs_hsi_control_read_error(hi, msg);
0436 goto out;
0437 }
0438 dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
0439 cs_release_cmd(msg);
0440 if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
0441 struct timespec64 tspec;
0442 struct cs_timestamp *tstamp =
0443 &hi->mmap_cfg->tstamp_rx_ctrl;
0444
0445 ktime_get_ts64(&tspec);
0446
0447 tstamp->tv_sec = (__u32) tspec.tv_sec;
0448 tstamp->tv_nsec = (__u32) tspec.tv_nsec;
0449 }
0450 spin_unlock(&hi->lock);
0451
0452 cs_notify_control(cmd);
0453
0454 out:
0455 cs_hsi_read_on_control(hi);
0456 }
0457
0458 static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
0459 {
0460 struct cs_hsi_iface *hi = msg->context;
0461 int ret;
0462
0463 if (msg->status == HSI_STATUS_ERROR) {
0464 dev_err(&hi->cl->device, "Control peek RX error detected\n");
0465 cs_hsi_control_read_error(hi, msg);
0466 return;
0467 }
0468
0469 WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
0470
0471 dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
0472 msg->sgt.nents = 1;
0473 msg->complete = cs_hsi_read_on_control_complete;
0474 ret = hsi_async_read(hi->cl, msg);
0475 if (ret)
0476 cs_hsi_control_read_error(hi, msg);
0477 }
0478
0479 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
0480 {
0481 struct hsi_msg *msg;
0482 int ret;
0483
0484 spin_lock(&hi->lock);
0485 if (hi->control_state & SSI_CHANNEL_STATE_READING) {
0486 dev_err(&hi->cl->device, "Control read already pending (%d)\n",
0487 hi->control_state);
0488 spin_unlock(&hi->lock);
0489 return;
0490 }
0491 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
0492 dev_err(&hi->cl->device, "Control read error (%d)\n",
0493 hi->control_state);
0494 spin_unlock(&hi->lock);
0495 return;
0496 }
0497 hi->control_state |= SSI_CHANNEL_STATE_READING;
0498 dev_dbg(&hi->cl->device, "Issuing RX on control\n");
0499 msg = cs_claim_cmd(hi);
0500 spin_unlock(&hi->lock);
0501
0502 msg->sgt.nents = 0;
0503 msg->complete = cs_hsi_peek_on_control_complete;
0504 ret = hsi_async_read(hi->cl, msg);
0505 if (ret)
0506 cs_hsi_control_read_error(hi, msg);
0507 }
0508
0509 static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
0510 {
0511 struct cs_hsi_iface *hi = msg->context;
0512 if (msg->status == HSI_STATUS_COMPLETED) {
0513 spin_lock(&hi->lock);
0514 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
0515 cs_release_cmd(msg);
0516 spin_unlock(&hi->lock);
0517 } else if (msg->status == HSI_STATUS_ERROR) {
0518 cs_hsi_control_write_error(hi, msg);
0519 } else {
0520 dev_err(&hi->cl->device,
0521 "unexpected status in control write callback %d\n",
0522 msg->status);
0523 }
0524 }
0525
0526 static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
0527 {
0528 struct hsi_msg *msg;
0529 int ret;
0530
0531 spin_lock(&hi->lock);
0532 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
0533 spin_unlock(&hi->lock);
0534 return -EIO;
0535 }
0536 if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
0537 dev_err(&hi->cl->device,
0538 "Write still pending on control channel.\n");
0539 spin_unlock(&hi->lock);
0540 return -EBUSY;
0541 }
0542 hi->control_state |= SSI_CHANNEL_STATE_WRITING;
0543 msg = cs_claim_cmd(hi);
0544 spin_unlock(&hi->lock);
0545
0546 cs_set_cmd(msg, message);
0547 msg->sgt.nents = 1;
0548 msg->complete = cs_hsi_write_on_control_complete;
0549 dev_dbg(&hi->cl->device,
0550 "Sending control message %08X\n", message);
0551 ret = hsi_async_write(hi->cl, msg);
0552 if (ret) {
0553 dev_err(&hi->cl->device,
0554 "async_write failed with %d\n", ret);
0555 cs_hsi_control_write_error(hi, msg);
0556 }
0557
0558
0559
0560
0561
0562
0563
0564
0565 if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
0566 dev_err(&hi->cl->device, "Restarting control reads\n");
0567 cs_hsi_read_on_control(hi);
0568 }
0569
0570 return 0;
0571 }
0572
0573 static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
0574 {
0575 struct cs_hsi_iface *hi = msg->context;
0576 u32 payload;
0577
0578 if (unlikely(msg->status == HSI_STATUS_ERROR)) {
0579 cs_hsi_data_read_error(hi, msg);
0580 return;
0581 }
0582
0583 spin_lock(&hi->lock);
0584 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
0585 hi->data_state &= ~SSI_CHANNEL_STATE_READING;
0586 payload = CS_RX_DATA_RECEIVED;
0587 payload |= hi->rx_slot;
0588 hi->rx_slot++;
0589 hi->rx_slot %= hi->rx_ptr_boundary;
0590
0591 hi->mmap_cfg->rx_ptr = hi->rx_slot;
0592 if (unlikely(waitqueue_active(&hi->datawait)))
0593 wake_up_interruptible(&hi->datawait);
0594 spin_unlock(&hi->lock);
0595
0596 cs_notify_data(payload, hi->rx_bufs);
0597 cs_hsi_read_on_data(hi);
0598 }
0599
0600 static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
0601 {
0602 struct cs_hsi_iface *hi = msg->context;
0603 u32 *address;
0604 int ret;
0605
0606 if (unlikely(msg->status == HSI_STATUS_ERROR)) {
0607 cs_hsi_data_read_error(hi, msg);
0608 return;
0609 }
0610 if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
0611 dev_err(&hi->cl->device, "Data received in invalid state\n");
0612 cs_hsi_data_read_error(hi, msg);
0613 return;
0614 }
0615
0616 spin_lock(&hi->lock);
0617 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
0618 hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
0619 hi->data_state |= SSI_CHANNEL_STATE_READING;
0620 spin_unlock(&hi->lock);
0621
0622 address = (u32 *)(hi->mmap_base +
0623 hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
0624 sg_init_one(msg->sgt.sgl, address, hi->buf_size);
0625 msg->sgt.nents = 1;
0626 msg->complete = cs_hsi_read_on_data_complete;
0627 ret = hsi_async_read(hi->cl, msg);
0628 if (ret)
0629 cs_hsi_data_read_error(hi, msg);
0630 }
0631
0632
0633
0634
0635
0636 static inline int cs_state_xfer_active(unsigned int state)
0637 {
0638 return (state & SSI_CHANNEL_STATE_WRITING) ||
0639 (state & SSI_CHANNEL_STATE_READING);
0640 }
0641
0642
0643
0644
0645 static inline int cs_state_idle(unsigned int state)
0646 {
0647 return !(state & ~SSI_CHANNEL_STATE_ERROR);
0648 }
0649
0650 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
0651 {
0652 struct hsi_msg *rxmsg;
0653 int ret;
0654
0655 spin_lock(&hi->lock);
0656 if (hi->data_state &
0657 (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
0658 dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
0659 hi->data_state);
0660 spin_unlock(&hi->lock);
0661 return;
0662 }
0663 hi->data_state |= SSI_CHANNEL_STATE_POLL;
0664 spin_unlock(&hi->lock);
0665
0666 rxmsg = hi->data_rx_msg;
0667 sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
0668 rxmsg->sgt.nents = 0;
0669 rxmsg->complete = cs_hsi_peek_on_data_complete;
0670
0671 ret = hsi_async_read(hi->cl, rxmsg);
0672 if (ret)
0673 cs_hsi_data_read_error(hi, rxmsg);
0674 }
0675
0676 static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
0677 {
0678 struct cs_hsi_iface *hi = msg->context;
0679
0680 if (msg->status == HSI_STATUS_COMPLETED) {
0681 spin_lock(&hi->lock);
0682 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
0683 if (unlikely(waitqueue_active(&hi->datawait)))
0684 wake_up_interruptible(&hi->datawait);
0685 spin_unlock(&hi->lock);
0686 } else {
0687 cs_hsi_data_write_error(hi, msg);
0688 }
0689 }
0690
0691 static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
0692 {
0693 u32 *address;
0694 struct hsi_msg *txmsg;
0695 int ret;
0696
0697 spin_lock(&hi->lock);
0698 if (hi->iface_state != CS_STATE_CONFIGURED) {
0699 dev_err(&hi->cl->device, "Not configured, aborting\n");
0700 ret = -EINVAL;
0701 goto error;
0702 }
0703 if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
0704 dev_err(&hi->cl->device, "HSI error, aborting\n");
0705 ret = -EIO;
0706 goto error;
0707 }
0708 if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
0709 dev_err(&hi->cl->device, "Write pending on data channel.\n");
0710 ret = -EBUSY;
0711 goto error;
0712 }
0713 hi->data_state |= SSI_CHANNEL_STATE_WRITING;
0714 spin_unlock(&hi->lock);
0715
0716 hi->tx_slot = slot;
0717 address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
0718 txmsg = hi->data_tx_msg;
0719 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
0720 txmsg->complete = cs_hsi_write_on_data_complete;
0721 ret = hsi_async_write(hi->cl, txmsg);
0722 if (ret)
0723 cs_hsi_data_write_error(hi, txmsg);
0724
0725 return ret;
0726
0727 error:
0728 spin_unlock(&hi->lock);
0729 if (ret == -EIO)
0730 cs_hsi_data_write_error(hi, hi->data_tx_msg);
0731
0732 return ret;
0733 }
0734
0735 static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
0736 {
0737 return hi->iface_state;
0738 }
0739
0740 static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
0741 {
0742 int ret = 0;
0743
0744 local_bh_disable();
0745 switch (cmd & TARGET_MASK) {
0746 case TARGET_REMOTE:
0747 ret = cs_hsi_write_on_control(hi, cmd);
0748 break;
0749 case TARGET_LOCAL:
0750 if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
0751 ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
0752 else
0753 ret = -EINVAL;
0754 break;
0755 default:
0756 ret = -EINVAL;
0757 break;
0758 }
0759 local_bh_enable();
0760
0761 return ret;
0762 }
0763
0764 static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
0765 {
0766 int change = 0;
0767
0768 spin_lock_bh(&hi->lock);
0769 if (hi->wakeline_state != new_state) {
0770 hi->wakeline_state = new_state;
0771 change = 1;
0772 dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
0773 new_state, hi->cl);
0774 }
0775 spin_unlock_bh(&hi->lock);
0776
0777 if (change) {
0778 if (new_state)
0779 ssip_slave_start_tx(hi->master);
0780 else
0781 ssip_slave_stop_tx(hi->master);
0782 }
0783
0784 dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
0785 new_state, hi->cl);
0786 }
0787
0788 static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
0789 {
0790 hi->rx_bufs = rx_bufs;
0791 hi->tx_bufs = tx_bufs;
0792 hi->mmap_cfg->rx_bufs = rx_bufs;
0793 hi->mmap_cfg->tx_bufs = tx_bufs;
0794
0795 if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
0796
0797
0798
0799
0800
0801
0802
0803 hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
0804 hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
0805 } else {
0806 hi->rx_ptr_boundary = hi->rx_bufs;
0807 }
0808 }
0809
0810 static int check_buf_params(struct cs_hsi_iface *hi,
0811 const struct cs_buffer_config *buf_cfg)
0812 {
0813 size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
0814 (buf_cfg->rx_bufs + buf_cfg->tx_bufs);
0815 size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
0816 int r = 0;
0817
0818 if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
0819 buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
0820 r = -EINVAL;
0821 } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
0822 dev_err(&hi->cl->device, "No space for the requested buffer "
0823 "configuration\n");
0824 r = -ENOBUFS;
0825 }
0826
0827 return r;
0828 }
0829
0830
0831
0832
0833 static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
0834 {
0835 int r = 0;
0836
0837 spin_lock_bh(&hi->lock);
0838
0839 if (!cs_state_xfer_active(hi->data_state)) {
0840 dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
0841 goto out;
0842 }
0843
0844 for (;;) {
0845 int s;
0846 DEFINE_WAIT(wait);
0847 if (!cs_state_xfer_active(hi->data_state))
0848 goto out;
0849 if (signal_pending(current)) {
0850 r = -ERESTARTSYS;
0851 goto out;
0852 }
0853
0854
0855
0856
0857 prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
0858 spin_unlock_bh(&hi->lock);
0859 s = schedule_timeout(
0860 msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS));
0861 spin_lock_bh(&hi->lock);
0862 finish_wait(&hi->datawait, &wait);
0863 if (!s) {
0864 dev_dbg(&hi->cl->device,
0865 "hsi_data_sync timeout after %d ms\n",
0866 CS_HSI_TRANSFER_TIMEOUT_MS);
0867 r = -EIO;
0868 goto out;
0869 }
0870 }
0871
0872 out:
0873 spin_unlock_bh(&hi->lock);
0874 dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
0875
0876 return r;
0877 }
0878
0879 static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
0880 struct cs_buffer_config *buf_cfg)
0881 {
0882 unsigned int data_start, i;
0883
0884 BUG_ON(hi->buf_size == 0);
0885
0886 set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
0887
0888 hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
0889 dev_dbg(&hi->cl->device,
0890 "setting slot size to %u, buf size %u, align %u\n",
0891 hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
0892
0893 data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
0894 dev_dbg(&hi->cl->device,
0895 "setting data start at %u, cfg block %u, align %u\n",
0896 data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
0897
0898 for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
0899 hi->rx_offsets[i] = data_start + i * hi->slot_size;
0900 hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
0901 dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
0902 i, hi->rx_offsets[i]);
0903 }
0904 for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
0905 hi->tx_offsets[i] = data_start +
0906 (i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
0907 hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
0908 dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
0909 i, hi->rx_offsets[i]);
0910 }
0911
0912 hi->iface_state = CS_STATE_CONFIGURED;
0913 }
0914
0915 static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
0916 {
0917 if (old_state == CS_STATE_CONFIGURED) {
0918 dev_dbg(&hi->cl->device,
0919 "closing data channel with slot size 0\n");
0920 hi->iface_state = CS_STATE_OPENED;
0921 }
0922 }
0923
0924 static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
0925 struct cs_buffer_config *buf_cfg)
0926 {
0927 int r = 0;
0928 unsigned int old_state = hi->iface_state;
0929
0930 spin_lock_bh(&hi->lock);
0931
0932 if (old_state == CS_STATE_CONFIGURED)
0933 hi->iface_state = CS_STATE_OPENED;
0934 spin_unlock_bh(&hi->lock);
0935
0936
0937
0938
0939
0940 r = cs_hsi_data_sync(hi);
0941 if (r < 0)
0942 return r;
0943
0944 WARN_ON(cs_state_xfer_active(hi->data_state));
0945
0946 spin_lock_bh(&hi->lock);
0947 r = check_buf_params(hi, buf_cfg);
0948 if (r < 0)
0949 goto error;
0950
0951 hi->buf_size = buf_cfg->buf_size;
0952 hi->mmap_cfg->buf_size = hi->buf_size;
0953 hi->flags = buf_cfg->flags;
0954
0955 hi->rx_slot = 0;
0956 hi->tx_slot = 0;
0957 hi->slot_size = 0;
0958
0959 if (hi->buf_size)
0960 cs_hsi_data_enable(hi, buf_cfg);
0961 else
0962 cs_hsi_data_disable(hi, old_state);
0963
0964 spin_unlock_bh(&hi->lock);
0965
0966 if (old_state != hi->iface_state) {
0967 if (hi->iface_state == CS_STATE_CONFIGURED) {
0968 cpu_latency_qos_add_request(&hi->pm_qos_req,
0969 CS_QOS_LATENCY_FOR_DATA_USEC);
0970 local_bh_disable();
0971 cs_hsi_read_on_data(hi);
0972 local_bh_enable();
0973 } else if (old_state == CS_STATE_CONFIGURED) {
0974 cpu_latency_qos_remove_request(&hi->pm_qos_req);
0975 }
0976 }
0977 return r;
0978
0979 error:
0980 spin_unlock_bh(&hi->lock);
0981 return r;
0982 }
0983
0984 static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
0985 unsigned long mmap_base, unsigned long mmap_size)
0986 {
0987 int err = 0;
0988 struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
0989
0990 dev_dbg(&cl->device, "cs_hsi_start\n");
0991
0992 if (!hsi_if) {
0993 err = -ENOMEM;
0994 goto leave0;
0995 }
0996 spin_lock_init(&hsi_if->lock);
0997 hsi_if->cl = cl;
0998 hsi_if->iface_state = CS_STATE_CLOSED;
0999 hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
1000 hsi_if->mmap_base = mmap_base;
1001 hsi_if->mmap_size = mmap_size;
1002 memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
1003 init_waitqueue_head(&hsi_if->datawait);
1004 err = cs_alloc_cmds(hsi_if);
1005 if (err < 0) {
1006 dev_err(&cl->device, "Unable to alloc HSI messages\n");
1007 goto leave1;
1008 }
1009 err = cs_hsi_alloc_data(hsi_if);
1010 if (err < 0) {
1011 dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
1012 goto leave2;
1013 }
1014 err = hsi_claim_port(cl, 1);
1015 if (err < 0) {
1016 dev_err(&cl->device,
1017 "Could not open, HSI port already claimed\n");
1018 goto leave3;
1019 }
1020 hsi_if->master = ssip_slave_get_master(cl);
1021 if (IS_ERR(hsi_if->master)) {
1022 err = PTR_ERR(hsi_if->master);
1023 dev_err(&cl->device, "Could not get HSI master client\n");
1024 goto leave4;
1025 }
1026 if (!ssip_slave_running(hsi_if->master)) {
1027 err = -ENODEV;
1028 dev_err(&cl->device,
1029 "HSI port not initialized\n");
1030 goto leave4;
1031 }
1032
1033 hsi_if->iface_state = CS_STATE_OPENED;
1034 local_bh_disable();
1035 cs_hsi_read_on_control(hsi_if);
1036 local_bh_enable();
1037
1038 dev_dbg(&cl->device, "cs_hsi_start...done\n");
1039
1040 BUG_ON(!hi);
1041 *hi = hsi_if;
1042
1043 return 0;
1044
1045 leave4:
1046 hsi_release_port(cl);
1047 leave3:
1048 cs_hsi_free_data(hsi_if);
1049 leave2:
1050 cs_free_cmds(hsi_if);
1051 leave1:
1052 kfree(hsi_if);
1053 leave0:
1054 dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
1055
1056 return err;
1057 }
1058
1059 static void cs_hsi_stop(struct cs_hsi_iface *hi)
1060 {
1061 dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
1062 cs_hsi_set_wakeline(hi, 0);
1063 ssip_slave_put_master(hi->master);
1064
1065
1066 hi->iface_state = CS_STATE_CLOSED;
1067 hsi_release_port(hi->cl);
1068
1069
1070
1071
1072
1073
1074 WARN_ON(!cs_state_idle(hi->control_state));
1075 WARN_ON(!cs_state_idle(hi->data_state));
1076
1077 if (cpu_latency_qos_request_active(&hi->pm_qos_req))
1078 cpu_latency_qos_remove_request(&hi->pm_qos_req);
1079
1080 spin_lock_bh(&hi->lock);
1081 cs_hsi_free_data(hi);
1082 cs_free_cmds(hi);
1083 spin_unlock_bh(&hi->lock);
1084 kfree(hi);
1085 }
1086
1087 static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
1088 {
1089 struct cs_char *csdata = vmf->vma->vm_private_data;
1090 struct page *page;
1091
1092 page = virt_to_page(csdata->mmap_base);
1093 get_page(page);
1094 vmf->page = page;
1095
1096 return 0;
1097 }
1098
1099 static const struct vm_operations_struct cs_char_vm_ops = {
1100 .fault = cs_char_vma_fault,
1101 };
1102
1103 static int cs_char_fasync(int fd, struct file *file, int on)
1104 {
1105 struct cs_char *csdata = file->private_data;
1106
1107 if (fasync_helper(fd, file, on, &csdata->async_queue) < 0)
1108 return -EIO;
1109
1110 return 0;
1111 }
1112
1113 static __poll_t cs_char_poll(struct file *file, poll_table *wait)
1114 {
1115 struct cs_char *csdata = file->private_data;
1116 __poll_t ret = 0;
1117
1118 poll_wait(file, &cs_char_data.wait, wait);
1119 spin_lock_bh(&csdata->lock);
1120 if (!list_empty(&csdata->chardev_queue))
1121 ret = EPOLLIN | EPOLLRDNORM;
1122 else if (!list_empty(&csdata->dataind_queue))
1123 ret = EPOLLIN | EPOLLRDNORM;
1124 spin_unlock_bh(&csdata->lock);
1125
1126 return ret;
1127 }
1128
1129 static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
1130 loff_t *unused)
1131 {
1132 struct cs_char *csdata = file->private_data;
1133 u32 data;
1134 ssize_t retval;
1135
1136 if (count < sizeof(data))
1137 return -EINVAL;
1138
1139 for (;;) {
1140 DEFINE_WAIT(wait);
1141
1142 spin_lock_bh(&csdata->lock);
1143 if (!list_empty(&csdata->chardev_queue)) {
1144 data = cs_pop_entry(&csdata->chardev_queue);
1145 } else if (!list_empty(&csdata->dataind_queue)) {
1146 data = cs_pop_entry(&csdata->dataind_queue);
1147 csdata->dataind_pending--;
1148 } else {
1149 data = 0;
1150 }
1151 spin_unlock_bh(&csdata->lock);
1152
1153 if (data)
1154 break;
1155 if (file->f_flags & O_NONBLOCK) {
1156 retval = -EAGAIN;
1157 goto out;
1158 } else if (signal_pending(current)) {
1159 retval = -ERESTARTSYS;
1160 goto out;
1161 }
1162 prepare_to_wait_exclusive(&csdata->wait, &wait,
1163 TASK_INTERRUPTIBLE);
1164 schedule();
1165 finish_wait(&csdata->wait, &wait);
1166 }
1167
1168 retval = put_user(data, (u32 __user *)buf);
1169 if (!retval)
1170 retval = sizeof(data);
1171
1172 out:
1173 return retval;
1174 }
1175
1176 static ssize_t cs_char_write(struct file *file, const char __user *buf,
1177 size_t count, loff_t *unused)
1178 {
1179 struct cs_char *csdata = file->private_data;
1180 u32 data;
1181 int err;
1182 ssize_t retval;
1183
1184 if (count < sizeof(data))
1185 return -EINVAL;
1186
1187 if (get_user(data, (u32 __user *)buf))
1188 retval = -EFAULT;
1189 else
1190 retval = count;
1191
1192 err = cs_hsi_command(csdata->hi, data);
1193 if (err < 0)
1194 retval = err;
1195
1196 return retval;
1197 }
1198
1199 static long cs_char_ioctl(struct file *file, unsigned int cmd,
1200 unsigned long arg)
1201 {
1202 struct cs_char *csdata = file->private_data;
1203 int r = 0;
1204
1205 switch (cmd) {
1206 case CS_GET_STATE: {
1207 unsigned int state;
1208
1209 state = cs_hsi_get_state(csdata->hi);
1210 if (copy_to_user((void __user *)arg, &state, sizeof(state)))
1211 r = -EFAULT;
1212
1213 break;
1214 }
1215 case CS_SET_WAKELINE: {
1216 unsigned int state;
1217
1218 if (copy_from_user(&state, (void __user *)arg, sizeof(state))) {
1219 r = -EFAULT;
1220 break;
1221 }
1222
1223 if (state > 1) {
1224 r = -EINVAL;
1225 break;
1226 }
1227
1228 cs_hsi_set_wakeline(csdata->hi, !!state);
1229
1230 break;
1231 }
1232 case CS_GET_IF_VERSION: {
1233 unsigned int ifver = CS_IF_VERSION;
1234
1235 if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
1236 r = -EFAULT;
1237
1238 break;
1239 }
1240 case CS_CONFIG_BUFS: {
1241 struct cs_buffer_config buf_cfg;
1242
1243 if (copy_from_user(&buf_cfg, (void __user *)arg,
1244 sizeof(buf_cfg)))
1245 r = -EFAULT;
1246 else
1247 r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
1248
1249 break;
1250 }
1251 default:
1252 r = -ENOTTY;
1253 break;
1254 }
1255
1256 return r;
1257 }
1258
1259 static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
1260 {
1261 if (vma->vm_end < vma->vm_start)
1262 return -EINVAL;
1263
1264 if (vma_pages(vma) != 1)
1265 return -EINVAL;
1266
1267 vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
1268 vma->vm_ops = &cs_char_vm_ops;
1269 vma->vm_private_data = file->private_data;
1270
1271 return 0;
1272 }
1273
1274 static int cs_char_open(struct inode *unused, struct file *file)
1275 {
1276 int ret = 0;
1277 unsigned long p;
1278
1279 spin_lock_bh(&cs_char_data.lock);
1280 if (cs_char_data.opened) {
1281 ret = -EBUSY;
1282 spin_unlock_bh(&cs_char_data.lock);
1283 goto out1;
1284 }
1285 cs_char_data.opened = 1;
1286 cs_char_data.dataind_pending = 0;
1287 spin_unlock_bh(&cs_char_data.lock);
1288
1289 p = get_zeroed_page(GFP_KERNEL);
1290 if (!p) {
1291 ret = -ENOMEM;
1292 goto out2;
1293 }
1294
1295 ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
1296 if (ret) {
1297 dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
1298 goto out3;
1299 }
1300
1301
1302 cs_char_data.mmap_base = p;
1303 cs_char_data.mmap_size = CS_MMAP_SIZE;
1304
1305 file->private_data = &cs_char_data;
1306
1307 return 0;
1308
1309 out3:
1310 free_page(p);
1311 out2:
1312 spin_lock_bh(&cs_char_data.lock);
1313 cs_char_data.opened = 0;
1314 spin_unlock_bh(&cs_char_data.lock);
1315 out1:
1316 return ret;
1317 }
1318
1319 static void cs_free_char_queue(struct list_head *head)
1320 {
1321 struct char_queue *entry;
1322 struct list_head *cursor, *next;
1323
1324 if (!list_empty(head)) {
1325 list_for_each_safe(cursor, next, head) {
1326 entry = list_entry(cursor, struct char_queue, list);
1327 list_del(&entry->list);
1328 kfree(entry);
1329 }
1330 }
1331
1332 }
1333
1334 static int cs_char_release(struct inode *unused, struct file *file)
1335 {
1336 struct cs_char *csdata = file->private_data;
1337
1338 cs_hsi_stop(csdata->hi);
1339 spin_lock_bh(&csdata->lock);
1340 csdata->hi = NULL;
1341 free_page(csdata->mmap_base);
1342 cs_free_char_queue(&csdata->chardev_queue);
1343 cs_free_char_queue(&csdata->dataind_queue);
1344 csdata->opened = 0;
1345 spin_unlock_bh(&csdata->lock);
1346
1347 return 0;
1348 }
1349
1350 static const struct file_operations cs_char_fops = {
1351 .owner = THIS_MODULE,
1352 .read = cs_char_read,
1353 .write = cs_char_write,
1354 .poll = cs_char_poll,
1355 .unlocked_ioctl = cs_char_ioctl,
1356 .mmap = cs_char_mmap,
1357 .open = cs_char_open,
1358 .release = cs_char_release,
1359 .fasync = cs_char_fasync,
1360 };
1361
1362 static struct miscdevice cs_char_miscdev = {
1363 .minor = MISC_DYNAMIC_MINOR,
1364 .name = "cmt_speech",
1365 .fops = &cs_char_fops
1366 };
1367
1368 static int cs_hsi_client_probe(struct device *dev)
1369 {
1370 int err = 0;
1371 struct hsi_client *cl = to_hsi_client(dev);
1372
1373 dev_dbg(dev, "hsi_client_probe\n");
1374 init_waitqueue_head(&cs_char_data.wait);
1375 spin_lock_init(&cs_char_data.lock);
1376 cs_char_data.opened = 0;
1377 cs_char_data.cl = cl;
1378 cs_char_data.hi = NULL;
1379 INIT_LIST_HEAD(&cs_char_data.chardev_queue);
1380 INIT_LIST_HEAD(&cs_char_data.dataind_queue);
1381
1382 cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
1383 "speech-control");
1384 if (cs_char_data.channel_id_cmd < 0) {
1385 err = cs_char_data.channel_id_cmd;
1386 dev_err(dev, "Could not get cmd channel (%d)\n", err);
1387 return err;
1388 }
1389
1390 cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
1391 "speech-data");
1392 if (cs_char_data.channel_id_data < 0) {
1393 err = cs_char_data.channel_id_data;
1394 dev_err(dev, "Could not get data channel (%d)\n", err);
1395 return err;
1396 }
1397
1398 err = misc_register(&cs_char_miscdev);
1399 if (err)
1400 dev_err(dev, "Failed to register: %d\n", err);
1401
1402 return err;
1403 }
1404
1405 static int cs_hsi_client_remove(struct device *dev)
1406 {
1407 struct cs_hsi_iface *hi;
1408
1409 dev_dbg(dev, "hsi_client_remove\n");
1410 misc_deregister(&cs_char_miscdev);
1411 spin_lock_bh(&cs_char_data.lock);
1412 hi = cs_char_data.hi;
1413 cs_char_data.hi = NULL;
1414 spin_unlock_bh(&cs_char_data.lock);
1415 if (hi)
1416 cs_hsi_stop(hi);
1417
1418 return 0;
1419 }
1420
1421 static struct hsi_client_driver cs_hsi_driver = {
1422 .driver = {
1423 .name = "cmt-speech",
1424 .owner = THIS_MODULE,
1425 .probe = cs_hsi_client_probe,
1426 .remove = cs_hsi_client_remove,
1427 },
1428 };
1429
1430 static int __init cs_char_init(void)
1431 {
1432 pr_info("CMT speech driver added\n");
1433 return hsi_register_client_driver(&cs_hsi_driver);
1434 }
1435 module_init(cs_char_init);
1436
1437 static void __exit cs_char_exit(void)
1438 {
1439 hsi_unregister_client_driver(&cs_hsi_driver);
1440 pr_info("CMT speech driver removed\n");
1441 }
1442 module_exit(cs_char_exit);
1443
1444 MODULE_ALIAS("hsi:cmt-speech");
1445 MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>");
1446 MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
1447 MODULE_DESCRIPTION("CMT speech driver");
1448 MODULE_LICENSE("GPL v2");