0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/stmmac.h>
0012 #include "common.h"
0013 #include "dwmac4.h"
0014 #include "dwmac4_descs.h"
0015
0016 static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
0017 struct dma_desc *p,
0018 void __iomem *ioaddr)
0019 {
0020 struct net_device_stats *stats = (struct net_device_stats *)data;
0021 unsigned int tdes3;
0022 int ret = tx_done;
0023
0024 tdes3 = le32_to_cpu(p->des3);
0025
0026
0027 if (unlikely(tdes3 & TDES3_OWN))
0028 return tx_dma_own;
0029
0030
0031 if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
0032 return tx_not_ls;
0033
0034 if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
0035 ret = tx_err;
0036
0037 if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
0038 x->tx_jabber++;
0039 if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
0040 x->tx_frame_flushed++;
0041 if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
0042 x->tx_losscarrier++;
0043 stats->tx_carrier_errors++;
0044 }
0045 if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
0046 x->tx_carrier++;
0047 stats->tx_carrier_errors++;
0048 }
0049 if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
0050 (tdes3 & TDES3_EXCESSIVE_COLLISION)))
0051 stats->collisions +=
0052 (tdes3 & TDES3_COLLISION_COUNT_MASK)
0053 >> TDES3_COLLISION_COUNT_SHIFT;
0054
0055 if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
0056 x->tx_deferred++;
0057
0058 if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) {
0059 x->tx_underflow++;
0060 ret |= tx_err_bump_tc;
0061 }
0062
0063 if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
0064 x->tx_ip_header_error++;
0065
0066 if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
0067 x->tx_payload_error++;
0068 }
0069
0070 if (unlikely(tdes3 & TDES3_DEFERRED))
0071 x->tx_deferred++;
0072
0073 return ret;
0074 }
0075
0076 static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
0077 struct dma_desc *p)
0078 {
0079 struct net_device_stats *stats = (struct net_device_stats *)data;
0080 unsigned int rdes1 = le32_to_cpu(p->des1);
0081 unsigned int rdes2 = le32_to_cpu(p->des2);
0082 unsigned int rdes3 = le32_to_cpu(p->des3);
0083 int message_type;
0084 int ret = good_frame;
0085
0086 if (unlikely(rdes3 & RDES3_OWN))
0087 return dma_own;
0088
0089 if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR))
0090 return discard_frame;
0091 if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
0092 return rx_not_ls;
0093
0094 if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
0095 if (unlikely(rdes3 & RDES3_GIANT_PACKET))
0096 stats->rx_length_errors++;
0097 if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
0098 x->rx_gmac_overflow++;
0099
0100 if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
0101 x->rx_watchdog++;
0102
0103 if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
0104 x->rx_mii++;
0105
0106 if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
0107 x->rx_crc_errors++;
0108 stats->rx_crc_errors++;
0109 }
0110
0111 if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
0112 x->dribbling_bit++;
0113
0114 ret = discard_frame;
0115 }
0116
0117 message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
0118
0119 if (rdes1 & RDES1_IP_HDR_ERROR)
0120 x->ip_hdr_err++;
0121 if (rdes1 & RDES1_IP_CSUM_BYPASSED)
0122 x->ip_csum_bypassed++;
0123 if (rdes1 & RDES1_IPV4_HEADER)
0124 x->ipv4_pkt_rcvd++;
0125 if (rdes1 & RDES1_IPV6_HEADER)
0126 x->ipv6_pkt_rcvd++;
0127
0128 if (message_type == RDES_EXT_NO_PTP)
0129 x->no_ptp_rx_msg_type_ext++;
0130 else if (message_type == RDES_EXT_SYNC)
0131 x->ptp_rx_msg_type_sync++;
0132 else if (message_type == RDES_EXT_FOLLOW_UP)
0133 x->ptp_rx_msg_type_follow_up++;
0134 else if (message_type == RDES_EXT_DELAY_REQ)
0135 x->ptp_rx_msg_type_delay_req++;
0136 else if (message_type == RDES_EXT_DELAY_RESP)
0137 x->ptp_rx_msg_type_delay_resp++;
0138 else if (message_type == RDES_EXT_PDELAY_REQ)
0139 x->ptp_rx_msg_type_pdelay_req++;
0140 else if (message_type == RDES_EXT_PDELAY_RESP)
0141 x->ptp_rx_msg_type_pdelay_resp++;
0142 else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
0143 x->ptp_rx_msg_type_pdelay_follow_up++;
0144 else if (message_type == RDES_PTP_ANNOUNCE)
0145 x->ptp_rx_msg_type_announce++;
0146 else if (message_type == RDES_PTP_MANAGEMENT)
0147 x->ptp_rx_msg_type_management++;
0148 else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
0149 x->ptp_rx_msg_pkt_reserved_type++;
0150
0151 if (rdes1 & RDES1_PTP_PACKET_TYPE)
0152 x->ptp_frame_type++;
0153 if (rdes1 & RDES1_PTP_VER)
0154 x->ptp_ver++;
0155 if (rdes1 & RDES1_TIMESTAMP_DROPPED)
0156 x->timestamp_dropped++;
0157
0158 if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
0159 x->sa_rx_filter_fail++;
0160 ret = discard_frame;
0161 }
0162 if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
0163 x->da_rx_filter_fail++;
0164 ret = discard_frame;
0165 }
0166
0167 if (rdes2 & RDES2_L3_FILTER_MATCH)
0168 x->l3_filter_match++;
0169 if (rdes2 & RDES2_L4_FILTER_MATCH)
0170 x->l4_filter_match++;
0171 if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
0172 >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
0173 x->l3_l4_filter_no_match++;
0174
0175 return ret;
0176 }
0177
0178 static int dwmac4_rd_get_tx_len(struct dma_desc *p)
0179 {
0180 return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
0181 }
0182
0183 static int dwmac4_get_tx_owner(struct dma_desc *p)
0184 {
0185 return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
0186 }
0187
0188 static void dwmac4_set_tx_owner(struct dma_desc *p)
0189 {
0190 p->des3 |= cpu_to_le32(TDES3_OWN);
0191 }
0192
0193 static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
0194 {
0195 p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
0196
0197 if (!disable_rx_ic)
0198 p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
0199 }
0200
0201 static int dwmac4_get_tx_ls(struct dma_desc *p)
0202 {
0203 return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
0204 >> TDES3_LAST_DESCRIPTOR_SHIFT;
0205 }
0206
0207 static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
0208 {
0209 return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
0210 }
0211
0212 static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
0213 {
0214 p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
0215 }
0216
0217 static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
0218 {
0219
0220 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
0221 return 0;
0222
0223
0224 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
0225 return 1;
0226
0227 return 0;
0228 }
0229
0230 static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
0231 {
0232 struct dma_desc *p = (struct dma_desc *)desc;
0233 u64 ns;
0234
0235 ns = le32_to_cpu(p->des0);
0236
0237 ns += le32_to_cpu(p->des1) * 1000000000ULL;
0238
0239 *ts = ns;
0240 }
0241
0242 static int dwmac4_rx_check_timestamp(void *desc)
0243 {
0244 struct dma_desc *p = (struct dma_desc *)desc;
0245 unsigned int rdes0 = le32_to_cpu(p->des0);
0246 unsigned int rdes1 = le32_to_cpu(p->des1);
0247 unsigned int rdes3 = le32_to_cpu(p->des3);
0248 u32 own, ctxt;
0249 int ret = 1;
0250
0251 own = rdes3 & RDES3_OWN;
0252 ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
0253 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
0254
0255 if (likely(!own && ctxt)) {
0256 if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
0257
0258 ret = -EINVAL;
0259 else
0260
0261 ret = 0;
0262 }
0263
0264
0265 return ret;
0266 }
0267
0268 static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
0269 u32 ats)
0270 {
0271 struct dma_desc *p = (struct dma_desc *)desc;
0272 int ret = -EINVAL;
0273
0274
0275 if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) {
0276 if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
0277 int i = 0;
0278
0279
0280 do {
0281 ret = dwmac4_rx_check_timestamp(next_desc);
0282 if (ret < 0)
0283 goto exit;
0284 i++;
0285
0286 } while ((ret == 1) && (i < 10));
0287
0288 if (i == 10)
0289 ret = -EBUSY;
0290 }
0291 }
0292 exit:
0293 if (likely(ret == 0))
0294 return 1;
0295
0296 return 0;
0297 }
0298
0299 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
0300 int mode, int end, int bfsize)
0301 {
0302 dwmac4_set_rx_owner(p, disable_rx_ic);
0303 }
0304
0305 static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
0306 {
0307 p->des0 = 0;
0308 p->des1 = 0;
0309 p->des2 = 0;
0310 p->des3 = 0;
0311 }
0312
0313 static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
0314 bool csum_flag, int mode, bool tx_own,
0315 bool ls, unsigned int tot_pkt_len)
0316 {
0317 unsigned int tdes3 = le32_to_cpu(p->des3);
0318
0319 p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
0320
0321 tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
0322 if (is_fs)
0323 tdes3 |= TDES3_FIRST_DESCRIPTOR;
0324 else
0325 tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
0326
0327 if (likely(csum_flag))
0328 tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
0329 else
0330 tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
0331
0332 if (ls)
0333 tdes3 |= TDES3_LAST_DESCRIPTOR;
0334 else
0335 tdes3 &= ~TDES3_LAST_DESCRIPTOR;
0336
0337
0338 if (tx_own)
0339 tdes3 |= TDES3_OWN;
0340
0341 if (is_fs && tx_own)
0342
0343
0344
0345
0346 dma_wmb();
0347
0348 p->des3 = cpu_to_le32(tdes3);
0349 }
0350
0351 static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
0352 int len1, int len2, bool tx_own,
0353 bool ls, unsigned int tcphdrlen,
0354 unsigned int tcppayloadlen)
0355 {
0356 unsigned int tdes3 = le32_to_cpu(p->des3);
0357
0358 if (len1)
0359 p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
0360
0361 if (len2)
0362 p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
0363 & TDES2_BUFFER2_SIZE_MASK);
0364
0365 if (is_fs) {
0366 tdes3 |= TDES3_FIRST_DESCRIPTOR |
0367 TDES3_TCP_SEGMENTATION_ENABLE |
0368 ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
0369 TDES3_SLOT_NUMBER_MASK) |
0370 ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
0371 } else {
0372 tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
0373 }
0374
0375 if (ls)
0376 tdes3 |= TDES3_LAST_DESCRIPTOR;
0377 else
0378 tdes3 &= ~TDES3_LAST_DESCRIPTOR;
0379
0380
0381 if (tx_own)
0382 tdes3 |= TDES3_OWN;
0383
0384 if (is_fs && tx_own)
0385
0386
0387
0388
0389 dma_wmb();
0390
0391 p->des3 = cpu_to_le32(tdes3);
0392 }
0393
0394 static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
0395 {
0396 p->des0 = 0;
0397 p->des1 = 0;
0398 p->des2 = 0;
0399 p->des3 = 0;
0400 }
0401
0402 static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
0403 {
0404 p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
0405 }
0406
0407 static void dwmac4_display_ring(void *head, unsigned int size, bool rx,
0408 dma_addr_t dma_rx_phy, unsigned int desc_size)
0409 {
0410 dma_addr_t dma_addr;
0411 int i;
0412
0413 pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
0414
0415 if (desc_size == sizeof(struct dma_desc)) {
0416 struct dma_desc *p = (struct dma_desc *)head;
0417
0418 for (i = 0; i < size; i++) {
0419 dma_addr = dma_rx_phy + i * sizeof(*p);
0420 pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
0421 i, &dma_addr,
0422 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
0423 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
0424 p++;
0425 }
0426 } else if (desc_size == sizeof(struct dma_extended_desc)) {
0427 struct dma_extended_desc *extp = (struct dma_extended_desc *)head;
0428
0429 for (i = 0; i < size; i++) {
0430 dma_addr = dma_rx_phy + i * sizeof(*extp);
0431 pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
0432 i, &dma_addr,
0433 le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1),
0434 le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3),
0435 le32_to_cpu(extp->des4), le32_to_cpu(extp->des5),
0436 le32_to_cpu(extp->des6), le32_to_cpu(extp->des7));
0437 extp++;
0438 }
0439 } else if (desc_size == sizeof(struct dma_edesc)) {
0440 struct dma_edesc *ep = (struct dma_edesc *)head;
0441
0442 for (i = 0; i < size; i++) {
0443 dma_addr = dma_rx_phy + i * sizeof(*ep);
0444 pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
0445 i, &dma_addr,
0446 le32_to_cpu(ep->des4), le32_to_cpu(ep->des5),
0447 le32_to_cpu(ep->des6), le32_to_cpu(ep->des7),
0448 le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1),
0449 le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3));
0450 ep++;
0451 }
0452 } else {
0453 pr_err("unsupported descriptor!");
0454 }
0455 }
0456
0457 static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
0458 {
0459 p->des0 = 0;
0460 p->des1 = 0;
0461 p->des2 = cpu_to_le32(mss);
0462 p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
0463 }
0464
0465 static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
0466 {
0467 p->des0 = cpu_to_le32(lower_32_bits(addr));
0468 p->des1 = cpu_to_le32(upper_32_bits(addr));
0469 }
0470
0471 static void dwmac4_clear(struct dma_desc *p)
0472 {
0473 p->des0 = 0;
0474 p->des1 = 0;
0475 p->des2 = 0;
0476 p->des3 = 0;
0477 }
0478
0479 static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type)
0480 {
0481 sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT;
0482
0483 p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK);
0484 }
0485
0486 static int set_16kib_bfsize(int mtu)
0487 {
0488 int ret = 0;
0489
0490 if (unlikely(mtu >= BUF_SIZE_8KiB))
0491 ret = BUF_SIZE_16KiB;
0492 return ret;
0493 }
0494
0495 static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
0496 u32 inner_type)
0497 {
0498 p->des0 = 0;
0499 p->des1 = 0;
0500 p->des2 = 0;
0501 p->des3 = 0;
0502
0503
0504 if (inner_type) {
0505 u32 des = inner_tag << TDES2_IVT_SHIFT;
0506
0507 des &= TDES2_IVT_MASK;
0508 p->des2 = cpu_to_le32(des);
0509
0510 des = inner_type << TDES3_IVTIR_SHIFT;
0511 des &= TDES3_IVTIR_MASK;
0512 p->des3 = cpu_to_le32(des | TDES3_IVLTV);
0513 }
0514
0515
0516 p->des3 |= cpu_to_le32(tag & TDES3_VLAN_TAG);
0517 p->des3 |= cpu_to_le32(TDES3_VLTV);
0518
0519 p->des3 |= cpu_to_le32(TDES3_CONTEXT_TYPE);
0520 }
0521
0522 static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
0523 {
0524 type <<= TDES2_VLAN_TAG_SHIFT;
0525 p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
0526 }
0527
0528 static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
0529 {
0530 *len = le32_to_cpu(p->des2) & RDES2_HL;
0531 }
0532
0533 static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid)
0534 {
0535 p->des2 = cpu_to_le32(lower_32_bits(addr));
0536 p->des3 = cpu_to_le32(upper_32_bits(addr));
0537
0538 if (buf2_valid)
0539 p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR);
0540 else
0541 p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR);
0542 }
0543
0544 static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
0545 {
0546 p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV);
0547 p->des5 = cpu_to_le32(nsec & TDES5_LT);
0548 p->des6 = 0;
0549 p->des7 = 0;
0550 }
0551
0552 const struct stmmac_desc_ops dwmac4_desc_ops = {
0553 .tx_status = dwmac4_wrback_get_tx_status,
0554 .rx_status = dwmac4_wrback_get_rx_status,
0555 .get_tx_len = dwmac4_rd_get_tx_len,
0556 .get_tx_owner = dwmac4_get_tx_owner,
0557 .set_tx_owner = dwmac4_set_tx_owner,
0558 .set_rx_owner = dwmac4_set_rx_owner,
0559 .get_tx_ls = dwmac4_get_tx_ls,
0560 .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
0561 .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
0562 .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
0563 .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
0564 .get_timestamp = dwmac4_get_timestamp,
0565 .set_tx_ic = dwmac4_rd_set_tx_ic,
0566 .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
0567 .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
0568 .release_tx_desc = dwmac4_release_tx_desc,
0569 .init_rx_desc = dwmac4_rd_init_rx_desc,
0570 .init_tx_desc = dwmac4_rd_init_tx_desc,
0571 .display_ring = dwmac4_display_ring,
0572 .set_mss = dwmac4_set_mss_ctxt,
0573 .set_addr = dwmac4_set_addr,
0574 .clear = dwmac4_clear,
0575 .set_sarc = dwmac4_set_sarc,
0576 .set_vlan_tag = dwmac4_set_vlan_tag,
0577 .set_vlan = dwmac4_set_vlan,
0578 .get_rx_header_len = dwmac4_get_rx_header_len,
0579 .set_sec_addr = dwmac4_set_sec_addr,
0580 .set_tbs = dwmac4_set_tbs,
0581 };
0582
0583 const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
0584 .set_16kib_bfsize = set_16kib_bfsize,
0585 };