0001
0002
0003
0004
0005
0006
0007 #include <linux/errno.h>
0008 #include <linux/types.h>
0009 #include <linux/socket.h>
0010 #include <linux/in.h>
0011 #include <linux/kernel.h>
0012 #include <linux/timer.h>
0013 #include <linux/string.h>
0014 #include <linux/sockios.h>
0015 #include <linux/net.h>
0016 #include <linux/slab.h>
0017 #include <net/ax25.h>
0018 #include <linux/inet.h>
0019 #include <linux/netdevice.h>
0020 #include <linux/skbuff.h>
0021 #include <net/sock.h>
0022 #include <linux/uaccess.h>
0023 #include <linux/fcntl.h>
0024 #include <linux/mm.h>
0025 #include <linux/interrupt.h>
0026 #include <net/netrom.h>
0027
0028
0029
0030
0031
0032 void nr_output(struct sock *sk, struct sk_buff *skb)
0033 {
0034 struct sk_buff *skbn;
0035 unsigned char transport[NR_TRANSPORT_LEN];
0036 int err, frontlen, len;
0037
0038 if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) {
0039
0040 skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN);
0041 skb_pull(skb, NR_TRANSPORT_LEN);
0042
0043 frontlen = skb_headroom(skb);
0044
0045 while (skb->len > 0) {
0046 if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL)
0047 return;
0048
0049 skb_reserve(skbn, frontlen);
0050
0051 len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE;
0052
0053
0054 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
0055 skb_pull(skb, len);
0056
0057
0058 skb_push(skbn, NR_TRANSPORT_LEN);
0059 skb_copy_to_linear_data(skbn, transport,
0060 NR_TRANSPORT_LEN);
0061 if (skb->len > 0)
0062 skbn->data[4] |= NR_MORE_FLAG;
0063
0064 skb_queue_tail(&sk->sk_write_queue, skbn);
0065 }
0066
0067 kfree_skb(skb);
0068 } else {
0069 skb_queue_tail(&sk->sk_write_queue, skb);
0070 }
0071
0072 nr_kick(sk);
0073 }
0074
0075
0076
0077
0078
0079 static void nr_send_iframe(struct sock *sk, struct sk_buff *skb)
0080 {
0081 struct nr_sock *nr = nr_sk(sk);
0082
0083 if (skb == NULL)
0084 return;
0085
0086 skb->data[2] = nr->vs;
0087 skb->data[3] = nr->vr;
0088
0089 if (nr->condition & NR_COND_OWN_RX_BUSY)
0090 skb->data[4] |= NR_CHOKE_FLAG;
0091
0092 nr_start_idletimer(sk);
0093
0094 nr_transmit_buffer(sk, skb);
0095 }
0096
0097 void nr_send_nak_frame(struct sock *sk)
0098 {
0099 struct sk_buff *skb, *skbn;
0100 struct nr_sock *nr = nr_sk(sk);
0101
0102 if ((skb = skb_peek(&nr->ack_queue)) == NULL)
0103 return;
0104
0105 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
0106 return;
0107
0108 skbn->data[2] = nr->va;
0109 skbn->data[3] = nr->vr;
0110
0111 if (nr->condition & NR_COND_OWN_RX_BUSY)
0112 skbn->data[4] |= NR_CHOKE_FLAG;
0113
0114 nr_transmit_buffer(sk, skbn);
0115
0116 nr->condition &= ~NR_COND_ACK_PENDING;
0117 nr->vl = nr->vr;
0118
0119 nr_stop_t1timer(sk);
0120 }
0121
0122 void nr_kick(struct sock *sk)
0123 {
0124 struct nr_sock *nr = nr_sk(sk);
0125 struct sk_buff *skb, *skbn;
0126 unsigned short start, end;
0127
0128 if (nr->state != NR_STATE_3)
0129 return;
0130
0131 if (nr->condition & NR_COND_PEER_RX_BUSY)
0132 return;
0133
0134 if (!skb_peek(&sk->sk_write_queue))
0135 return;
0136
0137 start = (skb_peek(&nr->ack_queue) == NULL) ? nr->va : nr->vs;
0138 end = (nr->va + nr->window) % NR_MODULUS;
0139
0140 if (start == end)
0141 return;
0142
0143 nr->vs = start;
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 skb = skb_dequeue(&sk->sk_write_queue);
0154
0155 do {
0156 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
0157 skb_queue_head(&sk->sk_write_queue, skb);
0158 break;
0159 }
0160
0161 skb_set_owner_w(skbn, sk);
0162
0163
0164
0165
0166 nr_send_iframe(sk, skbn);
0167
0168 nr->vs = (nr->vs + 1) % NR_MODULUS;
0169
0170
0171
0172
0173 skb_queue_tail(&nr->ack_queue, skb);
0174
0175 } while (nr->vs != end &&
0176 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
0177
0178 nr->vl = nr->vr;
0179 nr->condition &= ~NR_COND_ACK_PENDING;
0180
0181 if (!nr_t1timer_running(sk))
0182 nr_start_t1timer(sk);
0183 }
0184
0185 void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
0186 {
0187 struct nr_sock *nr = nr_sk(sk);
0188 unsigned char *dptr;
0189
0190
0191
0192
0193 dptr = skb_push(skb, NR_NETWORK_LEN);
0194
0195 memcpy(dptr, &nr->source_addr, AX25_ADDR_LEN);
0196 dptr[6] &= ~AX25_CBIT;
0197 dptr[6] &= ~AX25_EBIT;
0198 dptr[6] |= AX25_SSSID_SPARE;
0199 dptr += AX25_ADDR_LEN;
0200
0201 memcpy(dptr, &nr->dest_addr, AX25_ADDR_LEN);
0202 dptr[6] &= ~AX25_CBIT;
0203 dptr[6] |= AX25_EBIT;
0204 dptr[6] |= AX25_SSSID_SPARE;
0205 dptr += AX25_ADDR_LEN;
0206
0207 *dptr++ = sysctl_netrom_network_ttl_initialiser;
0208
0209 if (!nr_route_frame(skb, NULL)) {
0210 kfree_skb(skb);
0211 nr_disconnect(sk, ENETUNREACH);
0212 }
0213 }
0214
0215
0216
0217
0218
0219
0220 void nr_establish_data_link(struct sock *sk)
0221 {
0222 struct nr_sock *nr = nr_sk(sk);
0223
0224 nr->condition = 0x00;
0225 nr->n2count = 0;
0226
0227 nr_write_internal(sk, NR_CONNREQ);
0228
0229 nr_stop_t2timer(sk);
0230 nr_stop_t4timer(sk);
0231 nr_stop_idletimer(sk);
0232 nr_start_t1timer(sk);
0233 }
0234
0235
0236
0237
0238 void nr_enquiry_response(struct sock *sk)
0239 {
0240 struct nr_sock *nr = nr_sk(sk);
0241 int frametype = NR_INFOACK;
0242
0243 if (nr->condition & NR_COND_OWN_RX_BUSY) {
0244 frametype |= NR_CHOKE_FLAG;
0245 } else {
0246 if (skb_peek(&nr->reseq_queue) != NULL)
0247 frametype |= NR_NAK_FLAG;
0248 }
0249
0250 nr_write_internal(sk, frametype);
0251
0252 nr->vl = nr->vr;
0253 nr->condition &= ~NR_COND_ACK_PENDING;
0254 }
0255
0256 void nr_check_iframes_acked(struct sock *sk, unsigned short nr)
0257 {
0258 struct nr_sock *nrom = nr_sk(sk);
0259
0260 if (nrom->vs == nr) {
0261 nr_frames_acked(sk, nr);
0262 nr_stop_t1timer(sk);
0263 nrom->n2count = 0;
0264 } else {
0265 if (nrom->va != nr) {
0266 nr_frames_acked(sk, nr);
0267 nr_start_t1timer(sk);
0268 }
0269 }
0270 }