0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #define pr_fmt(fmt) "X25: " fmt
0020
0021 #include <linux/kernel.h>
0022 #include <linux/jiffies.h>
0023 #include <linux/timer.h>
0024 #include <linux/slab.h>
0025 #include <linux/netdevice.h>
0026 #include <linux/skbuff.h>
0027 #include <linux/uaccess.h>
0028 #include <linux/init.h>
0029 #include <net/x25.h>
0030
0031 LIST_HEAD(x25_neigh_list);
0032 DEFINE_RWLOCK(x25_neigh_list_lock);
0033
0034 static void x25_t20timer_expiry(struct timer_list *);
0035
0036 static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
0037 static void x25_transmit_restart_request(struct x25_neigh *nb);
0038
0039
0040
0041
0042 static inline void x25_start_t20timer(struct x25_neigh *nb)
0043 {
0044 mod_timer(&nb->t20timer, jiffies + nb->t20);
0045 }
0046
0047 static void x25_t20timer_expiry(struct timer_list *t)
0048 {
0049 struct x25_neigh *nb = from_timer(nb, t, t20timer);
0050
0051 x25_transmit_restart_request(nb);
0052
0053 x25_start_t20timer(nb);
0054 }
0055
0056 static inline void x25_stop_t20timer(struct x25_neigh *nb)
0057 {
0058 del_timer(&nb->t20timer);
0059 }
0060
0061
0062
0063
0064 void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
0065 unsigned short frametype)
0066 {
0067 struct sk_buff *skbn;
0068
0069 switch (frametype) {
0070 case X25_RESTART_REQUEST:
0071 switch (nb->state) {
0072 case X25_LINK_STATE_0:
0073
0074
0075
0076 nb->state = X25_LINK_STATE_3;
0077 x25_transmit_restart_confirmation(nb);
0078 break;
0079 case X25_LINK_STATE_2:
0080 x25_stop_t20timer(nb);
0081 nb->state = X25_LINK_STATE_3;
0082 break;
0083 case X25_LINK_STATE_3:
0084
0085 x25_kill_by_neigh(nb);
0086
0087 x25_transmit_restart_confirmation(nb);
0088 break;
0089 }
0090 break;
0091
0092 case X25_RESTART_CONFIRMATION:
0093 switch (nb->state) {
0094 case X25_LINK_STATE_2:
0095 x25_stop_t20timer(nb);
0096 nb->state = X25_LINK_STATE_3;
0097 break;
0098 case X25_LINK_STATE_3:
0099
0100 x25_kill_by_neigh(nb);
0101
0102 x25_transmit_restart_request(nb);
0103 nb->state = X25_LINK_STATE_2;
0104 x25_start_t20timer(nb);
0105 break;
0106 }
0107 break;
0108
0109 case X25_DIAGNOSTIC:
0110 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4))
0111 break;
0112
0113 pr_warn("diagnostic #%d - %02X %02X %02X\n",
0114 skb->data[3], skb->data[4],
0115 skb->data[5], skb->data[6]);
0116 break;
0117
0118 default:
0119 pr_warn("received unknown %02X with LCI 000\n",
0120 frametype);
0121 break;
0122 }
0123
0124 if (nb->state == X25_LINK_STATE_3)
0125 while ((skbn = skb_dequeue(&nb->queue)) != NULL)
0126 x25_send_frame(skbn, nb);
0127 }
0128
0129
0130
0131
0132 static void x25_transmit_restart_request(struct x25_neigh *nb)
0133 {
0134 unsigned char *dptr;
0135 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
0136 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
0137
0138 if (!skb)
0139 return;
0140
0141 skb_reserve(skb, X25_MAX_L2_LEN);
0142
0143 dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
0144
0145 *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
0146 *dptr++ = 0x00;
0147 *dptr++ = X25_RESTART_REQUEST;
0148 *dptr++ = 0x00;
0149 *dptr++ = 0;
0150
0151 skb->sk = NULL;
0152
0153 x25_send_frame(skb, nb);
0154 }
0155
0156
0157
0158
0159 static void x25_transmit_restart_confirmation(struct x25_neigh *nb)
0160 {
0161 unsigned char *dptr;
0162 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN;
0163 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
0164
0165 if (!skb)
0166 return;
0167
0168 skb_reserve(skb, X25_MAX_L2_LEN);
0169
0170 dptr = skb_put(skb, X25_STD_MIN_LEN);
0171
0172 *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ;
0173 *dptr++ = 0x00;
0174 *dptr++ = X25_RESTART_CONFIRMATION;
0175
0176 skb->sk = NULL;
0177
0178 x25_send_frame(skb, nb);
0179 }
0180
0181
0182
0183
0184
0185 void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
0186 unsigned char cause)
0187 {
0188 unsigned char *dptr;
0189 int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2;
0190 struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC);
0191
0192 if (!skb)
0193 return;
0194
0195 skb_reserve(skb, X25_MAX_L2_LEN);
0196
0197 dptr = skb_put(skb, X25_STD_MIN_LEN + 2);
0198
0199 *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ?
0200 X25_GFI_EXTSEQ :
0201 X25_GFI_STDSEQ);
0202 *dptr++ = (lci >> 0) & 0xFF;
0203 *dptr++ = X25_CLEAR_REQUEST;
0204 *dptr++ = cause;
0205 *dptr++ = 0x00;
0206
0207 skb->sk = NULL;
0208
0209 x25_send_frame(skb, nb);
0210 }
0211
0212 void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
0213 {
0214 switch (nb->state) {
0215 case X25_LINK_STATE_0:
0216 skb_queue_tail(&nb->queue, skb);
0217 nb->state = X25_LINK_STATE_1;
0218 x25_establish_link(nb);
0219 break;
0220 case X25_LINK_STATE_1:
0221 case X25_LINK_STATE_2:
0222 skb_queue_tail(&nb->queue, skb);
0223 break;
0224 case X25_LINK_STATE_3:
0225 x25_send_frame(skb, nb);
0226 break;
0227 }
0228 }
0229
0230
0231
0232
0233 void x25_link_established(struct x25_neigh *nb)
0234 {
0235 switch (nb->state) {
0236 case X25_LINK_STATE_0:
0237 case X25_LINK_STATE_1:
0238 x25_transmit_restart_request(nb);
0239 nb->state = X25_LINK_STATE_2;
0240 x25_start_t20timer(nb);
0241 break;
0242 }
0243 }
0244
0245
0246
0247
0248
0249
0250 void x25_link_terminated(struct x25_neigh *nb)
0251 {
0252 nb->state = X25_LINK_STATE_0;
0253 skb_queue_purge(&nb->queue);
0254 x25_stop_t20timer(nb);
0255
0256
0257 x25_kill_by_neigh(nb);
0258 }
0259
0260
0261
0262
0263 void x25_link_device_up(struct net_device *dev)
0264 {
0265 struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC);
0266
0267 if (!nb)
0268 return;
0269
0270 skb_queue_head_init(&nb->queue);
0271 timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
0272
0273 dev_hold(dev);
0274 nb->dev = dev;
0275 nb->state = X25_LINK_STATE_0;
0276 nb->extended = 0;
0277
0278
0279
0280 nb->global_facil_mask = X25_MASK_REVERSE |
0281 X25_MASK_THROUGHPUT |
0282 X25_MASK_PACKET_SIZE |
0283 X25_MASK_WINDOW_SIZE;
0284 nb->t20 = sysctl_x25_restart_request_timeout;
0285 refcount_set(&nb->refcnt, 1);
0286
0287 write_lock_bh(&x25_neigh_list_lock);
0288 list_add(&nb->node, &x25_neigh_list);
0289 write_unlock_bh(&x25_neigh_list_lock);
0290 }
0291
0292
0293
0294
0295
0296
0297
0298
0299 static void __x25_remove_neigh(struct x25_neigh *nb)
0300 {
0301 if (nb->node.next) {
0302 list_del(&nb->node);
0303 x25_neigh_put(nb);
0304 }
0305 }
0306
0307
0308
0309
0310 void x25_link_device_down(struct net_device *dev)
0311 {
0312 struct x25_neigh *nb;
0313 struct list_head *entry, *tmp;
0314
0315 write_lock_bh(&x25_neigh_list_lock);
0316
0317 list_for_each_safe(entry, tmp, &x25_neigh_list) {
0318 nb = list_entry(entry, struct x25_neigh, node);
0319
0320 if (nb->dev == dev) {
0321 __x25_remove_neigh(nb);
0322 dev_put(dev);
0323 }
0324 }
0325
0326 write_unlock_bh(&x25_neigh_list_lock);
0327 }
0328
0329
0330
0331
0332 struct x25_neigh *x25_get_neigh(struct net_device *dev)
0333 {
0334 struct x25_neigh *nb, *use = NULL;
0335
0336 read_lock_bh(&x25_neigh_list_lock);
0337 list_for_each_entry(nb, &x25_neigh_list, node) {
0338 if (nb->dev == dev) {
0339 use = nb;
0340 break;
0341 }
0342 }
0343
0344 if (use)
0345 x25_neigh_hold(use);
0346 read_unlock_bh(&x25_neigh_list_lock);
0347 return use;
0348 }
0349
0350
0351
0352
0353 int x25_subscr_ioctl(unsigned int cmd, void __user *arg)
0354 {
0355 struct x25_subscrip_struct x25_subscr;
0356 struct x25_neigh *nb;
0357 struct net_device *dev;
0358 int rc = -EINVAL;
0359
0360 if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP)
0361 goto out;
0362
0363 rc = -EFAULT;
0364 if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr)))
0365 goto out;
0366
0367 rc = -EINVAL;
0368 if ((dev = x25_dev_get(x25_subscr.device)) == NULL)
0369 goto out;
0370
0371 if ((nb = x25_get_neigh(dev)) == NULL)
0372 goto out_dev_put;
0373
0374 dev_put(dev);
0375
0376 if (cmd == SIOCX25GSUBSCRIP) {
0377 read_lock_bh(&x25_neigh_list_lock);
0378 x25_subscr.extended = nb->extended;
0379 x25_subscr.global_facil_mask = nb->global_facil_mask;
0380 read_unlock_bh(&x25_neigh_list_lock);
0381 rc = copy_to_user(arg, &x25_subscr,
0382 sizeof(x25_subscr)) ? -EFAULT : 0;
0383 } else {
0384 rc = -EINVAL;
0385 if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
0386 rc = 0;
0387 write_lock_bh(&x25_neigh_list_lock);
0388 nb->extended = x25_subscr.extended;
0389 nb->global_facil_mask = x25_subscr.global_facil_mask;
0390 write_unlock_bh(&x25_neigh_list_lock);
0391 }
0392 }
0393 x25_neigh_put(nb);
0394 out:
0395 return rc;
0396 out_dev_put:
0397 dev_put(dev);
0398 goto out;
0399 }
0400
0401
0402
0403
0404
0405 void __exit x25_link_free(void)
0406 {
0407 struct x25_neigh *nb;
0408 struct list_head *entry, *tmp;
0409
0410 write_lock_bh(&x25_neigh_list_lock);
0411
0412 list_for_each_safe(entry, tmp, &x25_neigh_list) {
0413 struct net_device *dev;
0414
0415 nb = list_entry(entry, struct x25_neigh, node);
0416 dev = nb->dev;
0417 __x25_remove_neigh(nb);
0418 dev_put(dev);
0419 }
0420 write_unlock_bh(&x25_neigh_list_lock);
0421 }