{
unsigned long ticks = (STP_HZ * j)/ HZ;
- put_unaligned(htons(ticks), (__be16 *)dest);
+ put_unaligned_be16(ticks, dest);
}
static inline int br_get_ticks(const unsigned char *src)
{
- unsigned long ticks = ntohs(get_unaligned((__be16 *)src));
+ unsigned long ticks = get_unaligned_be16(src);
return DIV_ROUND_UP(ticks * HZ, STP_HZ);
}
load_w:
ptr = load_pointer(skb, k, 4, &tmp);
if (ptr != NULL) {
- A = ntohl(get_unaligned((__be32 *)ptr));
+ A = get_unaligned_be32(ptr);
continue;
}
break;
load_h:
ptr = load_pointer(skb, k, 2, &tmp);
if (ptr != NULL) {
- A = ntohs(get_unaligned((__be16 *)ptr));
+ A = get_unaligned_be16(ptr);
continue;
}
break;
return -EFAULT;
for (iter = 0; iter < enumcat_len; iter += 2) {
- cat = ntohs(get_unaligned((__be16 *)&enumcat[iter]));
+ cat = get_unaligned_be16(&enumcat[iter]);
if (cat <= cat_prev)
return -EFAULT;
cat_prev = cat;
for (iter = 0; iter < net_cat_len; iter += 2) {
ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
- ntohs(get_unaligned((__be16 *)&net_cat[iter])),
+ get_unaligned_be16(&net_cat[iter]),
GFP_ATOMIC);
if (ret_val != 0)
return ret_val;
return -EFAULT;
for (iter = 0; iter < rngcat_len; iter += 4) {
- cat_high = ntohs(get_unaligned((__be16 *)&rngcat[iter]));
+ cat_high = get_unaligned_be16(&rngcat[iter]);
if ((iter + 4) <= rngcat_len)
- cat_low = ntohs(
- get_unaligned((__be16 *)&rngcat[iter + 2]));
+ cat_low = get_unaligned_be16(&rngcat[iter + 2]);
else
cat_low = 0;
u16 cat_high;
for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
- cat_high = ntohs(get_unaligned((__be16 *)&net_cat[net_iter]));
+ cat_high = get_unaligned_be16(&net_cat[net_iter]);
if ((net_iter + 4) <= net_cat_len)
- cat_low = ntohs(
- get_unaligned((__be16 *)&net_cat[net_iter + 2]));
+ cat_low = get_unaligned_be16(&net_cat[net_iter + 2]);
else
cat_low = 0;
}
rcu_read_lock();
- doi_def = cipso_v4_doi_search(ntohl(get_unaligned((__be32 *)&opt[2])));
+ doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
if (doi_def == NULL) {
err_offset = 2;
goto validate_return_locked;
if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0)
return 0;
- doi = ntohl(get_unaligned((__be32 *)&cipso[2]));
+ doi = get_unaligned_be32(&cipso[2]);
rcu_read_lock();
doi_def = cipso_v4_doi_search(doi);
if (doi_def == NULL)
struct tcp_sack_block_wire *sp, int num_sacks,
u32 prior_snd_una)
{
- u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq));
- u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq));
+ u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
+ u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
int dup_sack = 0;
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
tcp_dsack_seen(tp);
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1) {
- u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq));
- u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq));
+ u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
+ u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
if (!after(end_seq_0, end_seq_1) &&
!before(start_seq_0, start_seq_1)) {
for (i = 0; i < num_sacks; i++) {
int dup_sack = !i && found_dup_sack;
- sp[used_sacks].start_seq = ntohl(get_unaligned(&sp_wire[i].start_seq));
- sp[used_sacks].end_seq = ntohl(get_unaligned(&sp_wire[i].end_seq));
+ sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
+ sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
if (!tcp_is_sackblock_valid(tp, dup_sack,
sp[used_sacks].start_seq,
switch (opcode) {
case TCPOPT_MSS:
if (opsize == TCPOLEN_MSS && th->syn && !estab) {
- u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
+ u16 in_mss = get_unaligned_be16(ptr);
if (in_mss) {
if (opt_rx->user_mss &&
opt_rx->user_mss < in_mss)
((estab && opt_rx->tstamp_ok) ||
(!estab && sysctl_tcp_timestamps))) {
opt_rx->saw_tstamp = 1;
- opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
- opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
+ opt_rx->rcv_tsval = get_unaligned_be32(ptr);
+ opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
}
break;
case TCPOPT_SACK_PERM: