summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ppc64/kernel/time.c2
-rw-r--r--arch/ppc64/mm/init.c3
-rw-r--r--drivers/char/drm/drm_vm.c3
-rw-r--r--drivers/char/drm/mga_drv.h2
-rw-r--r--drivers/char/drm/mga_state.c2
-rw-r--r--drivers/message/fusion/mptsas.c12
-rw-r--r--include/linux/hugetlb.h16
-rw-r--r--mm/hugetlb.c22
-rw-r--r--mm/memory.c14
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/output.c10
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/ipv4/tcp_output.c12
13 files changed, 49 insertions, 53 deletions
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 9939c206afa4..b56c6a324e17 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -870,7 +870,7 @@ void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
rb = ((ra + b) - (x * divisor)) << 32;
y = (rb + c)/divisor;
- rc = ((rb + b) - (y * divisor)) << 32;
+ rc = ((rb + c) - (y * divisor)) << 32;
z = (rc + d)/divisor;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index c2157c9c3acb..be64b157afce 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -799,8 +799,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
- __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
- 0x300, local);
+ __hash_page(ea, 0, vsid, ptep, 0x300, local);
local_irq_restore(flags);
}
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index ced4215e2275..39ea96e42c5b 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -148,7 +148,8 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
offset = address - vma->vm_start;
i = (unsigned long)map->handle + offset;
- page = vmalloc_to_page((void *)i);
+ page = (map->type == _DRM_CONSISTENT) ?
+ virt_to_page((void *)i) : vmalloc_to_page((void *)i);
if (!page)
return NOPAGE_OOM;
get_page(page);
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index b22fdbd4f830..6059c5a5b105 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -227,7 +227,7 @@ static inline u32 _MGA_READ(u32 *addr)
#define MGA_EMIT_STATE( dev_priv, dirty ) \
do { \
if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \
- if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { \
+ if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \
mga_g400_emit_state( dev_priv ); \
} else { \
mga_g200_emit_state( dev_priv ); \
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 05bbb4719376..6ac5e006226f 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -53,7 +53,7 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
/* Force reset of DWGCTL on G400 (eliminates clip disable bit).
*/
- if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
MGA_LEN + MGA_EXEC, 0x80000000,
MGA_DWGCTL, ctx->dwgctl,
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 429820e48c69..7de19a84dc74 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -257,8 +257,8 @@ static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
printk("Target ID=0x%X\n", pg0->TargetID);
printk("Bus=0x%X\n", pg0->Bus);
- printk("PhyNum=0x%X\n", pg0->PhyNum);
- printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
+ printk("Parent Phy Num=0x%X\n", pg0->PhyNum);
+ printk("Access Status=0x%X\n", le16_to_cpu(pg0->AccessStatus));
printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
printk("Physical Port=0x%X\n", pg0->PhysicalPort);
@@ -270,7 +270,7 @@ static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
printk("---- SAS EXPANDER PAGE 1 ------------\n");
printk("Physical Port=0x%X\n", pg1->PhysicalPort);
- printk("PHY Identifier=0x%X\n", pg1->Phy);
+ printk("PHY Identifier=0x%X\n", pg1->PhyIdentifier);
printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
@@ -604,7 +604,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
mptsas_print_expander_pg1(buffer);
/* save config data */
- phy_info->phy_id = buffer->Phy;
+ phy_info->phy_id = buffer->PhyIdentifier;
phy_info->port_id = buffer->PhysicalPort;
phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
@@ -825,6 +825,8 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
(MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
+ port_info->phy_info[i].identify.phy_id =
+ port_info->phy_info[i].phy_id;
handle = port_info->phy_info[i].identify.handle;
if (port_info->phy_info[i].attached.handle) {
@@ -881,6 +883,8 @@ mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
port_info->phy_info[i].identify.handle);
+ port_info->phy_info[i].identify.phy_id =
+ port_info->phy_info[i].phy_id;
}
if (port_info->phy_info[i].attached.handle) {
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 42cb7d70f9ac..d664330d900e 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -25,6 +25,8 @@ int is_hugepage_mem_enough(size_t);
unsigned long hugetlb_total_pages(void);
struct page *alloc_huge_page(void);
void free_huge_page(struct page *);
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access);
extern unsigned long max_huge_pages;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
@@ -99,6 +101,7 @@ static inline unsigned long hugetlb_total_pages(void)
do { } while (0)
#define alloc_huge_page() ({ NULL; })
#define free_huge_page(p) ({ (void)(p); BUG(); })
+#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
#ifndef HPAGE_MASK
#define HPAGE_MASK 0 /* Keep the compiler happy */
@@ -155,24 +158,11 @@ static inline void set_file_hugepages(struct file *file)
{
file->f_op = &hugetlbfs_file_operations;
}
-
-static inline int valid_hugetlb_file_off(struct vm_area_struct *vma,
- unsigned long address)
-{
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- loff_t file_off = address - vma->vm_start;
-
- file_off += (vma->vm_pgoff << PAGE_SHIFT);
-
- return (file_off < inode->i_size);
-}
-
#else /* !CONFIG_HUGETLBFS */
#define is_file_hugepages(file) 0
#define set_file_hugepages(file) BUG()
#define hugetlb_zero_setup(size) ERR_PTR(-ENOSYS)
-#define valid_hugetlb_file_off(vma, address) 0
#endif /* !CONFIG_HUGETLBFS */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a1b30d45459e..61d380678030 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -394,6 +394,28 @@ out:
return ret;
}
+/*
+ * On ia64 at least, it is possible to receive a hugetlb fault from a
+ * stale zero entry left in the TLB from earlier hardware prefetching.
+ * Low-level arch code should already have flushed the stale entry as
+ * part of its fault handling, but we do need to accept this minor fault
+ * and return successfully. Whereas the "normal" case is that this is
+ * an access to a hugetlb page which has been truncated off since mmap.
+ */
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access)
+{
+ int ret = VM_FAULT_SIGBUS;
+ pte_t *pte;
+
+ spin_lock(&mm->page_table_lock);
+ pte = huge_pte_offset(mm, address);
+ if (pte && !pte_none(*pte))
+ ret = VM_FAULT_MINOR;
+ spin_unlock(&mm->page_table_lock);
+ return ret;
+}
+
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i)
diff --git a/mm/memory.c b/mm/memory.c
index 8c88b973abc5..1db40e935e55 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2045,18 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
inc_page_state(pgfault);
- if (unlikely(is_vm_hugetlb_page(vma))) {
- if (valid_hugetlb_file_off(vma, address))
- /* We get here only if there was a stale(zero) TLB entry
- * (because of HW prefetching).
- * Low-level arch code (if needed) should have already
- * purged the stale entry as part of this fault handling.
- * Here we just return.
- */
- return VM_FAULT_MINOR;
- else
- return VM_FAULT_SIGBUS; /* mapping truncation does this. */
- }
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, write_access);
/*
* We need the page table lock to synchronize with kswapd
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ae088d1347af..6298cf58ff9e 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -463,6 +463,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
if (skb != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr,
ireq->opt);
@@ -647,6 +648,7 @@ int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code)
if (skb != NULL) {
const struct inet_sock *inet = inet_sk(sk);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = ip_build_and_send_pkt(skb, sk,
inet->saddr, inet->daddr, NULL);
if (err == NET_XMIT_CN)
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 4786bdcddcc9..29250749f16f 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -62,10 +62,8 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
skb->h.raw = skb_push(skb, dccp_header_size);
dh = dccp_hdr(skb);
- /*
- * Data packets are not cloned as they are never retransmitted
- */
- if (skb_cloned(skb))
+
+ if (!skb->sk)
skb_set_owner_w(skb, sk);
/* Build DCCP header and checksum it. */
@@ -102,6 +100,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = ip_queue_xmit(skb, 0);
if (err <= 0)
return err;
@@ -243,7 +242,8 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo)
err = dccp_transmit_skb(sk, skb);
ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
- }
+ } else
+ kfree_skb(skb);
return err;
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a1cfd0e9e3bc..a021c3422f67 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -402,8 +402,6 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* This bug was _quickly_ found & fixed by just looking at an OSTRA
* generated callgraph 8) -acme
*/
- if (rc != 0)
- goto out_discard;
out_release:
release_sock(sk);
return rc ? : len;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7114031fdc70..b907456a79f4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -435,17 +435,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
int nsize, old_factor;
u16 flags;
- if (unlikely(len >= skb->len)) {
- if (net_ratelimit()) {
- printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, "
- "end_seq=%u, skb->len=%u.\n", len, mss_now,
- TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
- skb->len);
- WARN_ON(1);
- }
- return 0;
- }
-
+ BUG_ON(len > skb->len);
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;