0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 #define pr_fmt(fmt) "X25: " fmt
0022
0023 #include <linux/slab.h>
0024 #include <linux/errno.h>
0025 #include <linux/kernel.h>
0026 #include <linux/string.h>
0027 #include <linux/skbuff.h>
0028 #include <net/sock.h>
0029 #include <net/tcp_states.h>
0030 #include <net/x25.h>
0031
0032 static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
0033 {
0034 struct sk_buff *skbo, *skbn = skb;
0035 struct x25_sock *x25 = x25_sk(sk);
0036
0037 if (more) {
0038 x25->fraglen += skb->len;
0039 skb_queue_tail(&x25->fragment_queue, skb);
0040 skb_set_owner_r(skb, sk);
0041 return 0;
0042 }
0043
0044 if (x25->fraglen > 0) {
0045 int len = x25->fraglen + skb->len;
0046
0047 if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){
0048 kfree_skb(skb);
0049 return 1;
0050 }
0051
0052 skb_queue_tail(&x25->fragment_queue, skb);
0053
0054 skb_reset_transport_header(skbn);
0055
0056 skbo = skb_dequeue(&x25->fragment_queue);
0057 skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len),
0058 skbo->len);
0059 kfree_skb(skbo);
0060
0061 while ((skbo =
0062 skb_dequeue(&x25->fragment_queue)) != NULL) {
0063 skb_pull(skbo, (x25->neighbour->extended) ?
0064 X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
0065 skb_copy_from_linear_data(skbo,
0066 skb_put(skbn, skbo->len),
0067 skbo->len);
0068 kfree_skb(skbo);
0069 }
0070
0071 x25->fraglen = 0;
0072 }
0073
0074 skb_set_owner_r(skbn, sk);
0075 skb_queue_tail(&sk->sk_receive_queue, skbn);
0076 if (!sock_flag(sk, SOCK_DEAD))
0077 sk->sk_data_ready(sk);
0078
0079 return 0;
0080 }
0081
0082
0083
0084
0085
0086
0087 static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
0088 {
0089 struct x25_address source_addr, dest_addr;
0090 int len;
0091 struct x25_sock *x25 = x25_sk(sk);
0092
0093 switch (frametype) {
0094 case X25_CALL_ACCEPTED: {
0095
0096 x25_stop_timer(sk);
0097 x25->condition = 0x00;
0098 x25->vs = 0;
0099 x25->va = 0;
0100 x25->vr = 0;
0101 x25->vl = 0;
0102 x25->state = X25_STATE_3;
0103 sk->sk_state = TCP_ESTABLISHED;
0104
0105
0106
0107 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
0108 goto out_clear;
0109 skb_pull(skb, X25_STD_MIN_LEN);
0110
0111 len = x25_parse_address_block(skb, &source_addr,
0112 &dest_addr);
0113 if (len > 0)
0114 skb_pull(skb, len);
0115 else if (len < 0)
0116 goto out_clear;
0117
0118 len = x25_parse_facilities(skb, &x25->facilities,
0119 &x25->dte_facilities,
0120 &x25->vc_facil_mask);
0121 if (len > 0)
0122 skb_pull(skb, len);
0123 else if (len < 0)
0124 goto out_clear;
0125
0126
0127
0128 if (skb->len > 0) {
0129 if (skb->len > X25_MAX_CUD_LEN)
0130 goto out_clear;
0131
0132 skb_copy_bits(skb, 0, x25->calluserdata.cuddata,
0133 skb->len);
0134 x25->calluserdata.cudlength = skb->len;
0135 }
0136 if (!sock_flag(sk, SOCK_DEAD))
0137 sk->sk_state_change(sk);
0138 break;
0139 }
0140 case X25_CALL_REQUEST:
0141
0142 x25->causediag.cause = 0x01;
0143 x25->causediag.diagnostic = 0x48;
0144
0145 x25_write_internal(sk, X25_CLEAR_REQUEST);
0146 x25_disconnect(sk, EISCONN, 0x01, 0x48);
0147 break;
0148
0149 case X25_CLEAR_REQUEST:
0150 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
0151 goto out_clear;
0152
0153 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
0154 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
0155 break;
0156
0157 default:
0158 break;
0159 }
0160
0161 return 0;
0162
0163 out_clear:
0164 x25_write_internal(sk, X25_CLEAR_REQUEST);
0165 x25->state = X25_STATE_2;
0166 x25_start_t23timer(sk);
0167 return 0;
0168 }
0169
0170
0171
0172
0173
0174
0175 static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
0176 {
0177 switch (frametype) {
0178
0179 case X25_CLEAR_REQUEST:
0180 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
0181 goto out_clear;
0182
0183 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
0184 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
0185 break;
0186
0187 case X25_CLEAR_CONFIRMATION:
0188 x25_disconnect(sk, 0, 0, 0);
0189 break;
0190
0191 default:
0192 break;
0193 }
0194
0195 return 0;
0196
0197 out_clear:
0198 x25_write_internal(sk, X25_CLEAR_REQUEST);
0199 x25_start_t23timer(sk);
0200 return 0;
0201 }
0202
0203
0204
0205
0206
0207
0208 static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
0209 {
0210 int queued = 0;
0211 int modulus;
0212 struct x25_sock *x25 = x25_sk(sk);
0213
0214 modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
0215
0216 switch (frametype) {
0217
0218 case X25_RESET_REQUEST:
0219 x25_write_internal(sk, X25_RESET_CONFIRMATION);
0220 x25_stop_timer(sk);
0221 x25->condition = 0x00;
0222 x25->vs = 0;
0223 x25->vr = 0;
0224 x25->va = 0;
0225 x25->vl = 0;
0226 x25_requeue_frames(sk);
0227 break;
0228
0229 case X25_CLEAR_REQUEST:
0230 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
0231 goto out_clear;
0232
0233 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
0234 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
0235 break;
0236
0237 case X25_RR:
0238 case X25_RNR:
0239 if (!x25_validate_nr(sk, nr)) {
0240 x25_clear_queues(sk);
0241 x25_write_internal(sk, X25_RESET_REQUEST);
0242 x25_start_t22timer(sk);
0243 x25->condition = 0x00;
0244 x25->vs = 0;
0245 x25->vr = 0;
0246 x25->va = 0;
0247 x25->vl = 0;
0248 x25->state = X25_STATE_4;
0249 } else {
0250 x25_frames_acked(sk, nr);
0251 if (frametype == X25_RNR) {
0252 x25->condition |= X25_COND_PEER_RX_BUSY;
0253 } else {
0254 x25->condition &= ~X25_COND_PEER_RX_BUSY;
0255 }
0256 }
0257 break;
0258
0259 case X25_DATA:
0260 x25->condition &= ~X25_COND_PEER_RX_BUSY;
0261 if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) {
0262 x25_clear_queues(sk);
0263 x25_write_internal(sk, X25_RESET_REQUEST);
0264 x25_start_t22timer(sk);
0265 x25->condition = 0x00;
0266 x25->vs = 0;
0267 x25->vr = 0;
0268 x25->va = 0;
0269 x25->vl = 0;
0270 x25->state = X25_STATE_4;
0271 break;
0272 }
0273 x25_frames_acked(sk, nr);
0274 if (ns == x25->vr) {
0275 if (x25_queue_rx_frame(sk, skb, m) == 0) {
0276 x25->vr = (x25->vr + 1) % modulus;
0277 queued = 1;
0278 } else {
0279
0280 x25_clear_queues(sk);
0281 x25_write_internal(sk, X25_RESET_REQUEST);
0282 x25_start_t22timer(sk);
0283 x25->condition = 0x00;
0284 x25->vs = 0;
0285 x25->vr = 0;
0286 x25->va = 0;
0287 x25->vl = 0;
0288 x25->state = X25_STATE_4;
0289 break;
0290 }
0291 if (atomic_read(&sk->sk_rmem_alloc) >
0292 (sk->sk_rcvbuf >> 1))
0293 x25->condition |= X25_COND_OWN_RX_BUSY;
0294 }
0295
0296
0297
0298
0299 if (((x25->vl + x25->facilities.winsize_in) % modulus) == x25->vr) {
0300 x25->condition &= ~X25_COND_ACK_PENDING;
0301 x25_stop_timer(sk);
0302 x25_enquiry_response(sk);
0303 } else {
0304 x25->condition |= X25_COND_ACK_PENDING;
0305 x25_start_t2timer(sk);
0306 }
0307 break;
0308
0309 case X25_INTERRUPT_CONFIRMATION:
0310 clear_bit(X25_INTERRUPT_FLAG, &x25->flags);
0311 break;
0312
0313 case X25_INTERRUPT:
0314 if (sock_flag(sk, SOCK_URGINLINE))
0315 queued = !sock_queue_rcv_skb(sk, skb);
0316 else {
0317 skb_set_owner_r(skb, sk);
0318 skb_queue_tail(&x25->interrupt_in_queue, skb);
0319 queued = 1;
0320 }
0321 sk_send_sigurg(sk);
0322 x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION);
0323 break;
0324
0325 default:
0326 pr_warn("unknown %02X in state 3\n", frametype);
0327 break;
0328 }
0329
0330 return queued;
0331
0332 out_clear:
0333 x25_write_internal(sk, X25_CLEAR_REQUEST);
0334 x25->state = X25_STATE_2;
0335 x25_start_t23timer(sk);
0336 return 0;
0337 }
0338
0339
0340
0341
0342
0343
0344 static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
0345 {
0346 struct x25_sock *x25 = x25_sk(sk);
0347
0348 switch (frametype) {
0349
0350 case X25_RESET_REQUEST:
0351 x25_write_internal(sk, X25_RESET_CONFIRMATION);
0352 fallthrough;
0353 case X25_RESET_CONFIRMATION: {
0354 x25_stop_timer(sk);
0355 x25->condition = 0x00;
0356 x25->va = 0;
0357 x25->vr = 0;
0358 x25->vs = 0;
0359 x25->vl = 0;
0360 x25->state = X25_STATE_3;
0361 x25_requeue_frames(sk);
0362 break;
0363 }
0364 case X25_CLEAR_REQUEST:
0365 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
0366 goto out_clear;
0367
0368 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
0369 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
0370 break;
0371
0372 default:
0373 break;
0374 }
0375
0376 return 0;
0377
0378 out_clear:
0379 x25_write_internal(sk, X25_CLEAR_REQUEST);
0380 x25->state = X25_STATE_2;
0381 x25_start_t23timer(sk);
0382 return 0;
0383 }
0384
0385
0386
0387
0388
0389
0390 static int x25_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
0391 {
0392 struct x25_sock *x25 = x25_sk(sk);
0393
0394 switch (frametype) {
0395 case X25_CLEAR_REQUEST:
0396 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) {
0397 x25_write_internal(sk, X25_CLEAR_REQUEST);
0398 x25->state = X25_STATE_2;
0399 x25_start_t23timer(sk);
0400 return 0;
0401 }
0402
0403 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
0404 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
0405 break;
0406
0407 default:
0408 break;
0409 }
0410
0411 return 0;
0412 }
0413
0414
0415 int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
0416 {
0417 struct x25_sock *x25 = x25_sk(sk);
0418 int queued = 0, frametype, ns, nr, q, d, m;
0419
0420 if (x25->state == X25_STATE_0)
0421 return 0;
0422
0423 frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
0424
0425 switch (x25->state) {
0426 case X25_STATE_1:
0427 queued = x25_state1_machine(sk, skb, frametype);
0428 break;
0429 case X25_STATE_2:
0430 queued = x25_state2_machine(sk, skb, frametype);
0431 break;
0432 case X25_STATE_3:
0433 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
0434 break;
0435 case X25_STATE_4:
0436 queued = x25_state4_machine(sk, skb, frametype);
0437 break;
0438 case X25_STATE_5:
0439 queued = x25_state5_machine(sk, skb, frametype);
0440 break;
0441 }
0442
0443 x25_kick(sk);
0444
0445 return queued;
0446 }
0447
0448 int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
0449 {
0450 int queued = x25_process_rx_frame(sk, skb);
0451
0452 if (!queued)
0453 kfree_skb(skb);
0454
0455 return 0;
0456 }