0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bitops.h>
0010 #include <linux/kernel.h>
0011 #include <linux/module.h>
0012 #include <linux/slab.h>
0013 #include <linux/greybus.h>
0014 #include <linux/spi/spi.h>
0015
0016 #include "spilib.h"
0017
0018 struct gb_spilib {
0019 struct gb_connection *connection;
0020 struct device *parent;
0021 struct spi_transfer *first_xfer;
0022 struct spi_transfer *last_xfer;
0023 struct spilib_ops *ops;
0024 u32 rx_xfer_offset;
0025 u32 tx_xfer_offset;
0026 u32 last_xfer_size;
0027 unsigned int op_timeout;
0028 u16 mode;
0029 u16 flags;
0030 u32 bits_per_word_mask;
0031 u8 num_chipselect;
0032 u32 min_speed_hz;
0033 u32 max_speed_hz;
0034 };
0035
0036 #define GB_SPI_STATE_MSG_DONE ((void *)0)
0037 #define GB_SPI_STATE_MSG_IDLE ((void *)1)
0038 #define GB_SPI_STATE_MSG_RUNNING ((void *)2)
0039 #define GB_SPI_STATE_OP_READY ((void *)3)
0040 #define GB_SPI_STATE_OP_DONE ((void *)4)
0041 #define GB_SPI_STATE_MSG_ERROR ((void *)-1)
0042
0043 #define XFER_TIMEOUT_TOLERANCE 200
0044
0045 static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
0046 {
0047 return gb_connection_get_data(spi->connection);
0048 }
0049
0050 static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
0051 {
0052 size_t headers_size;
0053
0054 data_max -= sizeof(struct gb_spi_transfer_request);
0055 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
0056
0057 return tx_size + headers_size > data_max ? 0 : 1;
0058 }
0059
0060 static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
0061 size_t data_max)
0062 {
0063 size_t rx_xfer_size;
0064
0065 data_max -= sizeof(struct gb_spi_transfer_response);
0066
0067 if (rx_size + len > data_max)
0068 rx_xfer_size = data_max - rx_size;
0069 else
0070 rx_xfer_size = len;
0071
0072
0073 if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
0074 rx_xfer_size = *tx_xfer_size;
0075 if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
0076 *tx_xfer_size = rx_xfer_size;
0077
0078 return rx_xfer_size;
0079 }
0080
0081 static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
0082 size_t data_max)
0083 {
0084 size_t headers_size;
0085
0086 data_max -= sizeof(struct gb_spi_transfer_request);
0087 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
0088
0089 if (tx_size + headers_size + len > data_max)
0090 return data_max - (tx_size + sizeof(struct gb_spi_transfer));
0091
0092 return len;
0093 }
0094
0095 static void clean_xfer_state(struct gb_spilib *spi)
0096 {
0097 spi->first_xfer = NULL;
0098 spi->last_xfer = NULL;
0099 spi->rx_xfer_offset = 0;
0100 spi->tx_xfer_offset = 0;
0101 spi->last_xfer_size = 0;
0102 spi->op_timeout = 0;
0103 }
0104
0105 static bool is_last_xfer_done(struct gb_spilib *spi)
0106 {
0107 struct spi_transfer *last_xfer = spi->last_xfer;
0108
0109 if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
0110 (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
0111 return true;
0112
0113 return false;
0114 }
0115
0116 static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
0117 {
0118 struct spi_transfer *last_xfer = spi->last_xfer;
0119
0120 if (msg->state != GB_SPI_STATE_OP_DONE)
0121 return 0;
0122
0123
0124
0125
0126
0127 if (is_last_xfer_done(spi)) {
0128 spi->tx_xfer_offset = 0;
0129 spi->rx_xfer_offset = 0;
0130 spi->op_timeout = 0;
0131 if (last_xfer == list_last_entry(&msg->transfers,
0132 struct spi_transfer,
0133 transfer_list))
0134 msg->state = GB_SPI_STATE_MSG_DONE;
0135 else
0136 spi->first_xfer = list_next_entry(last_xfer,
0137 transfer_list);
0138 return 0;
0139 }
0140
0141 spi->first_xfer = last_xfer;
0142 if (last_xfer->tx_buf)
0143 spi->tx_xfer_offset += spi->last_xfer_size;
0144
0145 if (last_xfer->rx_buf)
0146 spi->rx_xfer_offset += spi->last_xfer_size;
0147
0148 return 0;
0149 }
0150
0151 static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
0152 struct spi_message *msg)
0153 {
0154 if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
0155 transfer_list))
0156 return NULL;
0157
0158 return list_next_entry(xfer, transfer_list);
0159 }
0160
0161
0162 static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
0163 struct gb_connection *connection, struct spi_message *msg)
0164 {
0165 struct gb_spi_transfer_request *request;
0166 struct spi_device *dev = msg->spi;
0167 struct spi_transfer *xfer;
0168 struct gb_spi_transfer *gb_xfer;
0169 struct gb_operation *operation;
0170 u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
0171 u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
0172 u32 total_len = 0;
0173 unsigned int xfer_timeout;
0174 size_t data_max;
0175 void *tx_data;
0176
0177 data_max = gb_operation_get_payload_size_max(connection);
0178 xfer = spi->first_xfer;
0179
0180
0181
0182 while (msg->state != GB_SPI_STATE_OP_READY) {
0183 msg->state = GB_SPI_STATE_MSG_RUNNING;
0184 spi->last_xfer = xfer;
0185
0186 if (!xfer->tx_buf && !xfer->rx_buf) {
0187 dev_err(spi->parent,
0188 "bufferless transfer, length %u\n", xfer->len);
0189 msg->state = GB_SPI_STATE_MSG_ERROR;
0190 return NULL;
0191 }
0192
0193 tx_xfer_size = 0;
0194 rx_xfer_size = 0;
0195
0196 if (xfer->tx_buf) {
0197 len = xfer->len - spi->tx_xfer_offset;
0198 if (!tx_header_fit_operation(tx_size, count, data_max))
0199 break;
0200 tx_xfer_size = calc_tx_xfer_size(tx_size, count,
0201 len, data_max);
0202 spi->last_xfer_size = tx_xfer_size;
0203 }
0204
0205 if (xfer->rx_buf) {
0206 len = xfer->len - spi->rx_xfer_offset;
0207 rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
0208 len, data_max);
0209 spi->last_xfer_size = rx_xfer_size;
0210 }
0211
0212 tx_size += tx_xfer_size;
0213 rx_size += rx_xfer_size;
0214
0215 total_len += spi->last_xfer_size;
0216 count++;
0217
0218 xfer = get_next_xfer(xfer, msg);
0219 if (!xfer || total_len >= data_max)
0220 msg->state = GB_SPI_STATE_OP_READY;
0221 }
0222
0223
0224
0225
0226
0227 request_size = sizeof(*request);
0228 request_size += count * sizeof(*gb_xfer);
0229 request_size += tx_size;
0230
0231
0232 operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
0233 request_size, rx_size, GFP_KERNEL);
0234 if (!operation)
0235 return NULL;
0236
0237 request = operation->request->payload;
0238 request->count = cpu_to_le16(count);
0239 request->mode = dev->mode;
0240 request->chip_select = dev->chip_select;
0241
0242 gb_xfer = &request->transfers[0];
0243 tx_data = gb_xfer + count;
0244
0245
0246 xfer = spi->first_xfer;
0247 while (msg->state != GB_SPI_STATE_OP_DONE) {
0248 int xfer_delay;
0249
0250 if (xfer == spi->last_xfer)
0251 xfer_len = spi->last_xfer_size;
0252 else
0253 xfer_len = xfer->len;
0254
0255
0256 xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
0257 xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
0258
0259 if (xfer_timeout > spi->op_timeout)
0260 spi->op_timeout = xfer_timeout;
0261
0262 gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
0263 gb_xfer->len = cpu_to_le32(xfer_len);
0264 xfer_delay = spi_delay_to_ns(&xfer->delay, xfer) / 1000;
0265 xfer_delay = clamp_t(u16, xfer_delay, 0, U16_MAX);
0266 gb_xfer->delay_usecs = cpu_to_le16(xfer_delay);
0267 gb_xfer->cs_change = xfer->cs_change;
0268 gb_xfer->bits_per_word = xfer->bits_per_word;
0269
0270
0271 if (xfer->tx_buf) {
0272 gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
0273 memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
0274 xfer_len);
0275 tx_data += xfer_len;
0276 }
0277
0278 if (xfer->rx_buf)
0279 gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
0280
0281 if (xfer == spi->last_xfer) {
0282 if (!is_last_xfer_done(spi))
0283 gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
0284 msg->state = GB_SPI_STATE_OP_DONE;
0285 continue;
0286 }
0287
0288 gb_xfer++;
0289 xfer = get_next_xfer(xfer, msg);
0290 }
0291
0292 msg->actual_length += total_len;
0293
0294 return operation;
0295 }
0296
0297 static void gb_spi_decode_response(struct gb_spilib *spi,
0298 struct spi_message *msg,
0299 struct gb_spi_transfer_response *response)
0300 {
0301 struct spi_transfer *xfer = spi->first_xfer;
0302 void *rx_data = response->data;
0303 u32 xfer_len;
0304
0305 while (xfer) {
0306
0307 if (xfer->rx_buf) {
0308 if (xfer == spi->first_xfer)
0309 xfer_len = xfer->len - spi->rx_xfer_offset;
0310 else if (xfer == spi->last_xfer)
0311 xfer_len = spi->last_xfer_size;
0312 else
0313 xfer_len = xfer->len;
0314
0315 memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
0316 xfer_len);
0317 rx_data += xfer_len;
0318 }
0319
0320 if (xfer == spi->last_xfer)
0321 break;
0322
0323 xfer = list_next_entry(xfer, transfer_list);
0324 }
0325 }
0326
0327 static int gb_spi_transfer_one_message(struct spi_master *master,
0328 struct spi_message *msg)
0329 {
0330 struct gb_spilib *spi = spi_master_get_devdata(master);
0331 struct gb_connection *connection = spi->connection;
0332 struct gb_spi_transfer_response *response;
0333 struct gb_operation *operation;
0334 int ret = 0;
0335
0336 spi->first_xfer = list_first_entry_or_null(&msg->transfers,
0337 struct spi_transfer,
0338 transfer_list);
0339 if (!spi->first_xfer) {
0340 ret = -ENOMEM;
0341 goto out;
0342 }
0343
0344 msg->state = GB_SPI_STATE_MSG_IDLE;
0345
0346 while (msg->state != GB_SPI_STATE_MSG_DONE &&
0347 msg->state != GB_SPI_STATE_MSG_ERROR) {
0348 operation = gb_spi_operation_create(spi, connection, msg);
0349 if (!operation) {
0350 msg->state = GB_SPI_STATE_MSG_ERROR;
0351 ret = -EINVAL;
0352 continue;
0353 }
0354
0355 ret = gb_operation_request_send_sync_timeout(operation,
0356 spi->op_timeout);
0357 if (!ret) {
0358 response = operation->response->payload;
0359 if (response)
0360 gb_spi_decode_response(spi, msg, response);
0361 } else {
0362 dev_err(spi->parent,
0363 "transfer operation failed: %d\n", ret);
0364 msg->state = GB_SPI_STATE_MSG_ERROR;
0365 }
0366
0367 gb_operation_put(operation);
0368 setup_next_xfer(spi, msg);
0369 }
0370
0371 out:
0372 msg->status = ret;
0373 clean_xfer_state(spi);
0374 spi_finalize_current_message(master);
0375
0376 return ret;
0377 }
0378
0379 static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
0380 {
0381 struct gb_spilib *spi = spi_master_get_devdata(master);
0382
0383 return spi->ops->prepare_transfer_hardware(spi->parent);
0384 }
0385
0386 static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
0387 {
0388 struct gb_spilib *spi = spi_master_get_devdata(master);
0389
0390 spi->ops->unprepare_transfer_hardware(spi->parent);
0391
0392 return 0;
0393 }
0394
0395 static int gb_spi_setup(struct spi_device *spi)
0396 {
0397
0398 return 0;
0399 }
0400
0401 static void gb_spi_cleanup(struct spi_device *spi)
0402 {
0403
0404 }
0405
0406
0407
0408
0409
0410
0411
0412 #define gb_spi_mode_map(mode) mode
0413 #define gb_spi_flags_map(flags) flags
0414
0415 static int gb_spi_get_master_config(struct gb_spilib *spi)
0416 {
0417 struct gb_spi_master_config_response response;
0418 u16 mode, flags;
0419 int ret;
0420
0421 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
0422 NULL, 0, &response, sizeof(response));
0423 if (ret < 0)
0424 return ret;
0425
0426 mode = le16_to_cpu(response.mode);
0427 spi->mode = gb_spi_mode_map(mode);
0428
0429 flags = le16_to_cpu(response.flags);
0430 spi->flags = gb_spi_flags_map(flags);
0431
0432 spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
0433 spi->num_chipselect = response.num_chipselect;
0434
0435 spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
0436 spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
0437
0438 return 0;
0439 }
0440
0441 static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
0442 {
0443 struct spi_master *master = get_master_from_spi(spi);
0444 struct gb_spi_device_config_request request;
0445 struct gb_spi_device_config_response response;
0446 struct spi_board_info spi_board = { {0} };
0447 struct spi_device *spidev;
0448 int ret;
0449 u8 dev_type;
0450
0451 request.chip_select = cs;
0452
0453 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
0454 &request, sizeof(request),
0455 &response, sizeof(response));
0456 if (ret < 0)
0457 return ret;
0458
0459 dev_type = response.device_type;
0460
0461 if (dev_type == GB_SPI_SPI_DEV)
0462 strscpy(spi_board.modalias, "spidev",
0463 sizeof(spi_board.modalias));
0464 else if (dev_type == GB_SPI_SPI_NOR)
0465 strscpy(spi_board.modalias, "spi-nor",
0466 sizeof(spi_board.modalias));
0467 else if (dev_type == GB_SPI_SPI_MODALIAS)
0468 memcpy(spi_board.modalias, response.name,
0469 sizeof(spi_board.modalias));
0470 else
0471 return -EINVAL;
0472
0473 spi_board.mode = le16_to_cpu(response.mode);
0474 spi_board.bus_num = master->bus_num;
0475 spi_board.chip_select = cs;
0476 spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
0477
0478 spidev = spi_new_device(master, &spi_board);
0479 if (!spidev)
0480 return -EINVAL;
0481
0482 return 0;
0483 }
0484
0485 int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
0486 struct spilib_ops *ops)
0487 {
0488 struct gb_spilib *spi;
0489 struct spi_master *master;
0490 int ret;
0491 u8 i;
0492
0493
0494 master = spi_alloc_master(dev, sizeof(*spi));
0495 if (!master) {
0496 dev_err(dev, "cannot alloc SPI master\n");
0497 return -ENOMEM;
0498 }
0499
0500 spi = spi_master_get_devdata(master);
0501 spi->connection = connection;
0502 gb_connection_set_data(connection, master);
0503 spi->parent = dev;
0504 spi->ops = ops;
0505
0506
0507 ret = gb_spi_get_master_config(spi);
0508 if (ret)
0509 goto exit_spi_put;
0510
0511 master->bus_num = -1;
0512 master->num_chipselect = spi->num_chipselect;
0513 master->mode_bits = spi->mode;
0514 master->flags = spi->flags;
0515 master->bits_per_word_mask = spi->bits_per_word_mask;
0516
0517
0518 master->cleanup = gb_spi_cleanup;
0519 master->setup = gb_spi_setup;
0520 master->transfer_one_message = gb_spi_transfer_one_message;
0521
0522 if (ops && ops->prepare_transfer_hardware) {
0523 master->prepare_transfer_hardware =
0524 gb_spi_prepare_transfer_hardware;
0525 }
0526
0527 if (ops && ops->unprepare_transfer_hardware) {
0528 master->unprepare_transfer_hardware =
0529 gb_spi_unprepare_transfer_hardware;
0530 }
0531
0532 master->auto_runtime_pm = true;
0533
0534 ret = spi_register_master(master);
0535 if (ret < 0)
0536 goto exit_spi_put;
0537
0538
0539 for (i = 0; i < spi->num_chipselect; i++) {
0540 ret = gb_spi_setup_device(spi, i);
0541 if (ret < 0) {
0542 dev_err(dev, "failed to allocate spi device %d: %d\n",
0543 i, ret);
0544 goto exit_spi_unregister;
0545 }
0546 }
0547
0548 return 0;
0549
0550 exit_spi_put:
0551 spi_master_put(master);
0552
0553 return ret;
0554
0555 exit_spi_unregister:
0556 spi_unregister_master(master);
0557
0558 return ret;
0559 }
0560 EXPORT_SYMBOL_GPL(gb_spilib_master_init);
0561
0562 void gb_spilib_master_exit(struct gb_connection *connection)
0563 {
0564 struct spi_master *master = gb_connection_get_data(connection);
0565
0566 spi_unregister_master(master);
0567 }
0568 EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
0569
0570 MODULE_LICENSE("GPL v2");