#!/bin/sh -x
# this is part 193 of a 197 - part archive
# do not concatenate these parts, unpack them in order with /bin/sh
# file patch-2.4.10 continued
if test ! -r _shar_seq_.tmp; then
echo 'Please unpack part 1 first!'
exit 1
fi
(read Scheck
if test "$Scheck" != 193; then
echo "Please unpack part $Scheck next!"
exit 1
else
exit 0
fi
) < _shar_seq_.tmp || exit 1
if test ! -f _shar_wnt_.tmp; then
echo 'x - still skipping patch-2.4.10'
else
echo 'x - continuing with patch-2.4.10'
sed 's/^X//' << 'SHAR_EOF' >> 'patch-2.4.10' &&
X p->lowest_bit = offset;
X if (offset > p->highest_bit)
@@ -202,21 +191,16 @@
X return;
X
X bad_nofile:
- printk("swap_free: Trying to free nonexistent swap-page\n");
+ printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
X goto out;
X bad_device:
- printk("swap_free: Trying to free swap from unused swap-device\n");
+ printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
X goto out;
X bad_offset:
- printk("swap_free: offset exceeds max\n");
+ printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
X goto out;
X bad_free:
- printk("VM: Bad swap entry %08lx\n", entry.val);
- goto out;
-bad_count:
- swap_device_unlock(p);
- swap_list_unlock();
- printk(KERN_ERR "VM: Bad count %hd current count %hd\n", count, p->swap_map[offset]);
+ printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
X goto out;
X }
X
@@ -229,33 +213,23 @@
X * share this swap entry, so be cautious and let do_wp_page work out
X * what to do if a write is requested later.
X */
-/* tasklist_lock and vma->vm_mm->page_table_lock are held */
+/* BKL, mmlist_lock and vma->vm_mm->page_table_lock are held */
X static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address,
X pte_t *dir, swp_entry_t entry, struct page* page)
X {
X pte_t pte = *dir;
X
- if (pte_none(pte))
- return;
- if (pte_present(pte)) {
- /* If this entry is swap-cached, then page must already
- hold the right address for any copies in physical
- memory */
- if (pte_page(pte) != page)
- return;
- /* We will be removing the swap cache in a moment, so... */
- ptep_mkdirty(dir);
+ if (likely(pte_to_swp_entry(pte).val != entry.val))
X return;
- }
- if (pte_to_swp_entry(pte).val != entry.val)
+ if (unlikely(pte_none(pte) || pte_present(pte)))
X return;
- set_pte(dir, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
- swap_free(entry);
X get_page(page);
+ set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
+ swap_free(entry);
X ++vma->vm_mm->rss;
X }
X
-/* tasklist_lock and vma->vm_mm->page_table_lock are held */
+/* BKL, mmlist_lock and vma->vm_mm->page_table_lock are held */
X static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
X unsigned long address, unsigned long size, unsigned long offset,
X swp_entry_t entry, struct page* page)
@@ -283,7 +257,7 @@
X } while (address && (address < end));
X }
X
-/* tasklist_lock and vma->vm_mm->page_table_lock are held */
+/* BKL, mmlist_lock and vma->vm_mm->page_table_lock are held */
X static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
X unsigned long address, unsigned long size,
X swp_entry_t entry, struct page* page)
@@ -314,7 +288,7 @@
X } while (address && (address < end));
X }
X
-/* tasklist_lock and vma->vm_mm->page_table_lock are held */
+/* BKL, mmlist_lock and vma->vm_mm->page_table_lock are held */
X static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
X swp_entry_t entry, struct page* page)
X {
@@ -337,8 +311,6 @@
X /*
X * Go through process' page directory.
X */
- if (!mm)
- return;
X spin_lock(&mm->page_table_lock);
X for (vma = mm->mmap; vma; vma = vma->vm_next) {
X pgd_t * pgd = pgd_offset(mm, vma->vm_start);
@@ -349,53 +321,42 @@
X }
X
X /*
- * this is called when we find a page in the swap list
- * all the locks have been dropped at this point which
- * isn't a problem because we rescan the swap map
- * and we _don't_ clear the refrence count if for
- * some reason it isn't 0
+ * Scan swap_map from current position to next entry still in use.
+ * Recycle to start on reaching the end, returning 0 when empty.
X */
-
-static inline int free_found_swap_entry(unsigned int type, int i)
+static int find_next_to_unuse(struct swap_info_struct *si, int prev)
X {
- struct task_struct *p;
- struct page *page;
- swp_entry_t entry;
-
- entry = SWP_ENTRY(type, i);
+ int max = si->max;
+ int i = prev;
+ int count;
X
- /*
- * Get a page for the entry, using the existing swap
- * cache page if there is one. Otherwise, get a clean
- * page and read the swap into it.
- */
- page = read_swap_cache_async(entry);
- if (!page) {
- swap_free(entry);
- return -ENOMEM;
- }
- lock_page(page);
- if (PageSwapCache(page))
- delete_from_swap_cache_nolock(page);
- UnlockPage(page);
- read_lock(&tasklist_lock);
- for_each_task(p)
- unuse_process(p->mm, entry, page);
- read_unlock(&tasklist_lock);
- shmem_unuse(entry, page);
- /*
- * Now get rid of the extra reference to the temporary
- * page we've been using.
- */
- page_cache_release(page);
X /*
- * Check for and clear any overflowed swap map counts.
+ * No need for swap_device_lock(si) here: we're just looking
+ * for whether an entry is in use, not modifying it; false
+ * hits are okay, and sys_swapoff() has already prevented new
+ * allocations from this area (while holding swap_list_lock()).
X */
- swap_free(entry);
- return 0;
+ for (;;) {
+ if (++i >= max) {
+ if (!prev) {
+ i = 0;
+ break;
+ }
+ /*
+ * No entries in use at top of swap_map,
+ * loop back to start and recheck there.
+ */
+ max = prev + 1;
+ prev = 0;
+ i = 1;
+ }
+ count = si->swap_map[i];
+ if (count && count != SWAP_MAP_BAD)
+ break;
+ }
+ return i;
X }
X
-
X /*
X * We completely avoid races by reading each swap page in advance,
X * and then search for the process using it. All the necessary
@@ -404,85 +365,181 @@
X static int try_to_unuse(unsigned int type)
X {
X struct swap_info_struct * si = &swap_info[type];
- int ret, foundpage;
+ struct mm_struct *start_mm;
+ unsigned short *swap_map;
+ unsigned short swcount;
+ struct page *page;
+ swp_entry_t entry;
+ int i = 0;
+ int retval = 0;
+ int reset_overflow = 0;
X
- do {
- int i;
+ /*
+ * When searching mms for an entry, a good strategy is to
+ * start at the first mm we freed the previous entry from
+ * (though actually we don't notice whether we or coincidence
+ * freed the entry). Initialize this start_mm with a hold.
+ *
+ * A simpler strategy would be to start at the last mm we
+ * freed the previous entry from; but that would take less
+ * advantage of mmlist ordering (now preserved by swap_out()),
+ * which clusters forked address spaces together, most recent
+ * child immediately after parent. If we race with dup_mmap(),
+ * we very much want to resolve parent before child, otherwise
+ * we may miss some entries: using last mm would invert that.
+ */
+ start_mm = &init_mm;
+ atomic_inc(&init_mm.mm_users);
X
- /*
- * The algorithm is inefficient but seldomly used
- *
- * Find a swap page in use and read it in.
+ /*
+ * Keep on scanning until all entries have gone. Usually,
+ * one pass through swap_map is enough, but not necessarily:
+ * mmput() removes mm from mmlist before exit_mmap() and its
+ * zap_page_range(). That's not too bad, those entries are
+ * on their way out, and handled faster there than here.
+ * do_munmap() behaves similarly, taking the range out of mm's
+ * vma list before zap_page_range(). But unfortunately, when
+ * unmapping a part of a vma, it takes the whole out first,
+ * then reinserts what's left after (might even reschedule if
+ * open() method called) - so swap entries may be invisible
+ * to swapoff for a while, then reappear - but that is rare.
+ */
+ while ((i = find_next_to_unuse(si, i))) {
+ /*
+ * Get a page for the entry, using the existing swap
+ * cache page if there is one. Otherwise, get a clean
+ * page and read the swap into it.
X */
- foundpage = 0;
- swap_device_lock(si);
- for (i = 1; i < si->max ; i++) {
- int count = si->swap_map[i];
- if (!count || count == SWAP_MAP_BAD)
- continue;
-
+ swap_map = &si->swap_map[i];
+ entry = SWP_ENTRY(type, i);
+ page = read_swap_cache_async(entry);
+ if (!page) {
X /*
- * Prevent swaphandle from being completely
- * unused by swap_free while we are trying
- * to read in the page - this prevents warning
- * messages from rw_swap_page_base.
+ * Either swap_duplicate() failed because entry
+ * has been freed independently, and will not be
+ * reused since sys_swapoff() already disabled
+ * allocation from here, or alloc_page() failed.
X */
- foundpage = 1;
- if (count != SWAP_MAP_MAX)
- si->swap_map[i] = count + 1;
+ if (!*swap_map)
+ continue;
+ retval = -ENOMEM;
+ break;
+ }
X
- swap_device_unlock(si);
- ret = free_found_swap_entry(type,i);
- if (ret)
- return ret;
+ /*
+ * Don't hold on to start_mm if it looks like exiting.
+ * Can mmput ever block? if so, then we cannot risk
+ * it between deleting the page from the swap cache,
+ * and completing the search through mms (and cannot
+ * use it to avoid the long hold on mmlist_lock there).
+ */
+ if (atomic_read(&start_mm->mm_users) == 1) {
+ mmput(start_mm);
+ start_mm = &init_mm;
+ atomic_inc(&init_mm.mm_users);
+ }
X
- /*
- * we pick up the swap_list_lock() to guard the nr_swap_pages,
- * si->swap_map[] should only be changed if it is SWAP_MAP_MAX
- * otherwise ugly stuff can happen with other people who are in
- * the middle of a swap operation to this device. This kind of
- * operation can sometimes be detected with the undead swap
- * check. Don't worry about these 'undead' entries for now
- * they will be caught the next time though the top loop.
- * Do worry, about the weak locking that allows this to happen
- * because if it happens to a page that is SWAP_MAP_MAX
- * then bad stuff can happen.
- */
- swap_list_lock();
- swap_device_lock(si);
- if (si->swap_map[i] > 0) {
- /* normally this would just kill the swap page if
- * it still existed, it appears though that the locks
- * are a little fuzzy
- */
- if (si->swap_map[i] != SWAP_MAP_MAX) {
- printk("VM: Undead swap entry %08lx\n",
- SWP_ENTRY(type, i).val);
- } else {
- nr_swap_pages++;
- si->swap_map[i] = 0;
+ /*
+ * Wait for and lock page. Remove it from swap cache
+ * so try_to_swap_out won't bump swap count. Mark dirty
+ * so try_to_swap_out will preserve it without us having
+ * to mark any present ptes as dirty: so we can skip
+ * searching processes once swap count has all gone.
+ */
+ lock_page(page);
+ if (PageSwapCache(page))
+ delete_from_swap_cache(page);
+ SetPageDirty(page);
+ UnlockPage(page);
+ flush_page_to_ram(page);
+
+ /*
+ * Remove all references to entry, without blocking.
+ * Whenever we reach init_mm, there's no address space
+ * to search, but use it as a reminder to search shmem.
+ */
+ swcount = *swap_map;
+ if (swcount) {
+ if (start_mm == &init_mm)
+ shmem_unuse(entry, page);
+ else
+ unuse_process(start_mm, entry, page);
+ }
+ if (*swap_map) {
+ int set_start_mm = (*swap_map >= swcount);
+ struct list_head *p = &start_mm->mmlist;
+ struct mm_struct *new_start_mm = start_mm;
+ struct mm_struct *mm;
+
+ spin_lock(&mmlist_lock);
+ while (*swap_map && (p = p->next) != &start_mm->mmlist) {
+ mm = list_entry(p, struct mm_struct, mmlist);
+ swcount = *swap_map;
+ if (mm == &init_mm) {
+ set_start_mm = 1;
+ shmem_unuse(entry, page);
+ } else
+ unuse_process(mm, entry, page);
+ if (set_start_mm && *swap_map < swcount) {
+ new_start_mm = mm;
+ set_start_mm = 0;
X }
X }
+ atomic_inc(&new_start_mm->mm_users);
+ spin_unlock(&mmlist_lock);
+ mmput(start_mm);
+ start_mm = new_start_mm;
+ }
+ page_cache_release(page);
+
+ /*
+ * How could swap count reach 0x7fff when the maximum
+ * pid is 0x7fff, and there's no way to repeat a swap
+ * page within an mm (except in shmem, where it's the
+ * shared object which takes the reference count)?
+ * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
+ *
+ * If that's wrong, then we should worry more about
+ * exit_mmap() and do_munmap() cases described above:
+ * we might be resetting SWAP_MAP_MAX too early here.
+ * We know "Undead"s can happen, they're okay, so don't
+ * report them; but do report if we reset SWAP_MAP_MAX.
+ */
+ if (*swap_map == SWAP_MAP_MAX) {
+ swap_list_lock();
+ swap_device_lock(si);
+ nr_swap_pages++;
+ *swap_map = 0;
X swap_device_unlock(si);
X swap_list_unlock();
+ reset_overflow = 1;
+ }
X
- /*
- * This lock stuff is ulgy!
- * Make sure that we aren't completely killing
- * interactive performance.
- */
- if (current->need_resched)
- schedule();
- swap_device_lock(si);
+ /*
+ * Make sure that we aren't completely killing
+ * interactive performance. Interruptible check on
+ * signal_pending() would be nice, but changes the spec?
+ */
+ if (current->need_resched)
+ schedule();
+ else {
+ unlock_kernel();
+ lock_kernel();
X }
- swap_device_unlock(si);
- } while (foundpage);
- return 0;
+ }
+
+ mmput(start_mm);
+ if (reset_overflow) {
+ printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
+ swap_overflow = 0;
+ }
+ return retval;
X }
X
X asmlinkage long sys_swapoff(const char * specialfile)
X {
X struct swap_info_struct * p = NULL;
+ unsigned short *swap_map;
X struct nameidata nd;
X int i, type, prev;
X int err;
@@ -500,14 +557,8 @@
X for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
X p = swap_info + type;
X if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
- if (p->swap_file) {
- if (p->swap_file == nd.dentry)
- break;
- } else {
- if (S_ISBLK(nd.dentry->d_inode->i_mode)
- && (p->swap_device == nd.dentry->d_inode->i_rdev))
- break;
- }
+ if (p->swap_file == nd.dentry)
+ break;
X }
X prev = type;
X }
@@ -528,8 +579,8 @@
X }
X nr_swap_pages -= p->pages;
X total_swap_pages -= p->pages;
- swap_list_unlock();
X p->flags = SWP_USED;
+ swap_list_unlock();
X err = try_to_unuse(type);
X if (err) {
X /* re-insert swap space back into swap_list */
@@ -544,22 +595,28 @@
X swap_info[prev].next = p - swap_info;
X nr_swap_pages += p->pages;
X total_swap_pages += p->pages;
- swap_list_unlock();
X p->flags = SWP_WRITEOK;
+ swap_list_unlock();
X goto out_dput;
X }
X if (p->swap_device)
- blkdev_put(nd.dentry->d_inode->i_bdev, BDEV_SWAP);
+ blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP);
X path_release(&nd);
X
- nd.dentry = p->swap_file;
- p->swap_file = NULL;
+ swap_list_lock();
+ swap_device_lock(p);
X nd.mnt = p->swap_vfsmnt;
+ nd.dentry = p->swap_file;
X p->swap_vfsmnt = NULL;
+ p->swap_file = NULL;
X p->swap_device = 0;
- vfree(p->swap_map);
+ p->max = 0;
+ swap_map = p->swap_map;
X p->swap_map = NULL;
X p->flags = 0;
+ swap_device_unlock(p);
+ swap_list_unlock();
+ vfree(swap_map);
X err = 0;
X
X out_dput:
@@ -637,20 +694,24 @@
X union swap_header *swap_header = 0;
X int swap_header_version;
X int nr_good_pages = 0;
- unsigned long maxpages;
+ unsigned long maxpages = 1;
X int swapfilesize;
X struct block_device *bdev = NULL;
+ unsigned short *swap_map;
X
X if (!capable(CAP_SYS_ADMIN))
X return -EPERM;
X lock_kernel();
+ swap_list_lock();
X p = swap_info;
X for (type = 0 ; type < nr_swapfiles ; type++,p++)
X if (!(p->flags & SWP_USED))
X break;
X error = -EPERM;
- if (type >= MAX_SWAPFILES)
+ if (type >= MAX_SWAPFILES) {
+ swap_list_unlock();
X goto out;
+ }
X if (type >= nr_swapfiles)
X nr_swapfiles = type+1;
X p->flags = SWP_USED;
@@ -662,7 +723,6 @@
X p->highest_bit = 0;
X p->cluster_nr = 0;
X p->sdev_lock = SPIN_LOCK_UNLOCKED;
- p->max = 1;
X p->next = -1;
X if (swap_flags & SWAP_FLAG_PREFER) {
X p->prio =
@@ -670,6 +730,7 @@
X } else {
X p->prio = --least_priority;
X }
+ swap_list_unlock();
X error = user_path_walk(specialfile, &nd);
X if (error)
X goto bad_swap_2;
@@ -686,6 +747,7 @@
X p->swap_device = dev;
X set_blocksize(dev, PAGE_SIZE);
X
+ bd_acquire(swap_inode);
X bdev = swap_inode->i_bdev;
X bdops = devfs_get_ops(devfs_get_handle_from_inode(swap_inode));
X if (bdops) bdev->bd_op = bdops;
@@ -698,29 +760,24 @@
X if (!dev || (blk_size[MAJOR(dev)] &&
X !blk_size[MAJOR(dev)][MINOR(dev)]))
X goto bad_swap;
- error = -EBUSY;
- for (i = 0 ; i < nr_swapfiles ; i++) {
- if (i == type)
- continue;
- if (dev == swap_info[i].swap_device)
- goto bad_swap;
- }
X swapfilesize = 0;
X if (blk_size[MAJOR(dev)])
X swapfilesize = blk_size[MAJOR(dev)][MINOR(dev)]
X >> (PAGE_SHIFT - 10);
- } else if (S_ISREG(swap_inode->i_mode)) {
- error = -EBUSY;
- for (i = 0 ; i < nr_swapfiles ; i++) {
- if (i == type || !swap_info[i].swap_file)
- continue;
- if (swap_inode == swap_info[i].swap_file->d_inode)
- goto bad_swap;
- }
+ } else if (S_ISREG(swap_inode->i_mode))
X swapfilesize = swap_inode->i_size >> PAGE_SHIFT;
- } else
+ else
X goto bad_swap;
X
+ error = -EBUSY;
+ for (i = 0 ; i < nr_swapfiles ; i++) {
+ struct swap_info_struct *q = &swap_info[i];
+ if (i == type || !q->swap_file)
+ continue;
+ if (swap_inode->i_mapping == q->swap_file->d_inode->i_mapping)
+ goto bad_swap;
+ }
+
X swap_header = (void *) __get_free_page(GFP_USER);
X if (!swap_header) {
X printk("Unable to start swapping: out of memory :-)\n");
@@ -752,17 +809,17 @@
X if (!p->lowest_bit)
X p->lowest_bit = i;
X p->highest_bit = i;
- p->max = i+1;
+ maxpages = i+1;
X j++;
X }
X }
X nr_good_pages = j;
- p->swap_map = vmalloc(p->max * sizeof(short));
+ p->swap_map = vmalloc(maxpages * sizeof(short));
X if (!p->swap_map) {
X error = -ENOMEM;
X goto bad_swap;
X }
- for (i = 1 ; i < p->max ; i++) {
+ for (i = 1 ; i < maxpages ; i++) {
X if (test_bit(i,(char *) swap_header))
X p->swap_map[i] = 0;
X else
@@ -782,25 +839,23 @@
X }
X
X p->lowest_bit = 1;
- p->highest_bit = swap_header->info.last_page - 1;
- p->max = swap_header->info.last_page;
-
- maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL));
- if (p->max >= maxpages)
- p->max = maxpages-1;
+ maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1;
+ if (maxpages > swap_header->info.last_page)
+ maxpages = swap_header->info.last_page;
+ p->highest_bit = maxpages - 1;
X
X error = -EINVAL;
X if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
X goto bad_swap;
X
X /* OK, set up the swap map and apply the bad block list */
- if (!(p->swap_map = vmalloc (p->max * sizeof(short)))) {
+ if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
X error = -ENOMEM;
X goto bad_swap;
X }
X
X error = 0;
- memset(p->swap_map, 0, p->max * sizeof(short));
+ memset(p->swap_map, 0, maxpages * sizeof(short));
X for (i=0; i<swap_header->info.nr_badpages; i++) {
X int page = swap_header->info.badpages[i];
X if (page <= 0 || page >= swap_header->info.last_page)
@@ -815,7 +870,7 @@
X goto bad_swap;
X }
X
- if (swapfilesize && p->max > swapfilesize) {
+ if (swapfilesize && maxpages > swapfilesize) {
X printk(KERN_WARNING
X "Swap area shorter than signature indicates\n");
X error = -EINVAL;
@@ -827,9 +882,11 @@
X goto bad_swap;
X }
X p->swap_map[0] = SWAP_MAP_BAD;
+ swap_list_lock();
+ swap_device_lock(p);
+ p->max = maxpages;
X p->flags = SWP_WRITEOK;
X p->pages = nr_good_pages;
- swap_list_lock();
X nr_swap_pages += nr_good_pages;
X total_swap_pages += nr_good_pages;
X printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n",
@@ -849,6 +906,7 @@
X } else {
X swap_info[prev].next = p - swap_info;
X }
+ swap_device_unlock(p);
X swap_list_unlock();
X error = 0;
X goto out;
@@ -856,8 +914,8 @@
X if (bdev)
X blkdev_put(bdev, BDEV_SWAP);
X bad_swap_2:
- if (p->swap_map)
- vfree(p->swap_map);
+ swap_list_lock();
+ swap_map = p->swap_map;
X nd.mnt = p->swap_vfsmnt;
X nd.dentry = p->swap_file;
X p->swap_device = 0;
@@ -867,6 +925,9 @@
X p->flags = 0;
X if (!(swap_flags & SWAP_FLAG_PREFER))
X ++least_priority;
+ swap_list_unlock();
+ if (swap_map)
+ vfree(swap_map);
X path_release(&nd);
X out:
X if (swap_header)
@@ -878,32 +939,30 @@
X void si_swapinfo(struct sysinfo *val)
X {
X unsigned int i;
- unsigned long freeswap = 0;
- unsigned long totalswap = 0;
+ unsigned long nr_to_be_unused = 0;
X
+ swap_list_lock();
X for (i = 0; i < nr_swapfiles; i++) {
X unsigned int j;
- if ((swap_info[i].flags & SWP_WRITEOK) != SWP_WRITEOK)
+ if (swap_info[i].flags != SWP_USED)
X continue;
X for (j = 0; j < swap_info[i].max; ++j) {
X switch (swap_info[i].swap_map[j]) {
+ case 0:
X case SWAP_MAP_BAD:
X continue;
- case 0:
- freeswap++;
X default:
- totalswap++;
+ nr_to_be_unused++;
X }
X }
X }
- val->freeswap = freeswap;
- val->totalswap = totalswap;
- return;
+ val->freeswap = nr_swap_pages + nr_to_be_unused;
+ val->totalswap = total_swap_pages + nr_to_be_unused;
+ swap_list_unlock();
X }
X
X /*
X * Verify that a swap entry is valid and increment its swap map count.
- * Kernel_lock is held, which guarantees existance of swap device.
X *
X * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
X * "permanent", but will be reclaimed by the next swapoff.
@@ -914,43 +973,30 @@
X unsigned long offset, type;
X int result = 0;
X
- /* Swap entry 0 is illegal */
- if (!entry.val)
- goto out;
X type = SWP_TYPE(entry);
X if (type >= nr_swapfiles)
X goto bad_file;
X p = type + swap_info;
X offset = SWP_OFFSET(entry);
- if (offset >= p->max)
- goto bad_offset;
- if (!p->swap_map[offset])
- goto bad_unused;
- /*
- * Entry is valid, so increment the map count.
- */
+
X swap_device_lock(p);
- if (p->swap_map[offset] < SWAP_MAP_MAX)
- p->swap_map[offset]++;
- else {
- static int overflow = 0;
- if (overflow++ < 5)
- printk("VM: swap entry overflow\n");
- p->swap_map[offset] = SWAP_MAP_MAX;
+ if (offset < p->max && p->swap_map[offset]) {
+ if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
+ p->swap_map[offset]++;
+ result = 1;
+ } else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
+ if (swap_overflow++ < 5)
+ printk(KERN_WARNING "swap_dup: swap entry overflow\n");
+ p->swap_map[offset] = SWAP_MAP_MAX;
+ result = 1;
+ }
X }
X swap_device_unlock(p);
- result = 1;
X out:
X return result;
X
X bad_file:
- printk("Bad swap file entry %08lx\n", entry.val);
- goto out;
-bad_offset:
- printk("Bad swap offset entry %08lx\n", entry.val);
- goto out;
-bad_unused:
- printk("Unused swap offset entry in swap_dup %08lx\n", entry.val);
+ printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
X goto out;
X }
X
@@ -985,18 +1031,18 @@
X printk(KERN_ERR "swap_count: null entry!\n");
X goto out;
X bad_file:
- printk("Bad swap file entry %08lx\n", entry.val);
+ printk(KERN_ERR "swap_count: %s%08lx\n", Bad_file, entry.val);
X goto out;
X bad_offset:
- printk("Bad swap offset entry %08lx\n", entry.val);
+ printk(KERN_ERR "swap_count: %s%08lx\n", Bad_offset, entry.val);
X goto out;
X bad_unused:
- printk("Unused swap offset entry in swap_count %08lx\n", entry.val);
+ printk(KERN_ERR "swap_count: %s%08lx\n", Unused_offset, entry.val);
X goto out;
X }
X
X /*
- * Kernel_lock protects against swap device deletion.
+ * Prior swap_duplicate protects against swap device deletion.
X */
X void get_swaphandle_info(swp_entry_t entry, unsigned long *offset,
X kdev_t *dev, struct inode **swapf)
@@ -1006,23 +1052,22 @@
X
X type = SWP_TYPE(entry);
X if (type >= nr_swapfiles) {
- printk("Internal error: bad swap-device\n");
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val);
X return;
X }
X
X p = &swap_info[type];
X *offset = SWP_OFFSET(entry);
- if (*offset >= p->max) {
- printk("rw_swap_page: weirdness\n");
+ if (*offset >= p->max && *offset != 0) {
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset, entry.val);
X return;
X }
X if (p->swap_map && !p->swap_map[*offset]) {
- printk("VM: Bad swap entry %08lx\n", entry.val);
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset, entry.val);
X return;
X }
X if (!(p->flags & SWP_USED)) {
- printk(KERN_ERR "rw_swap_page: "
- "Trying to swap to unused swap-device\n");
+ printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file, entry.val);
X return;
X }
X
@@ -1037,8 +1082,8 @@
X }
X
X /*
- * Kernel_lock protects against swap device deletion. Grab an extra
- * reference on the swaphandle so that it dos not become unused.
+ * swap_device_lock prevents swap_map being freed. Don't grab an extra
+ * reference on the swaphandle, it doesn't matter if it becomes unused.
X */
X int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
X {
@@ -1046,20 +1091,23 @@
X unsigned long toff;
X struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info;
X
- *offset = SWP_OFFSET(entry);
- toff = *offset = (*offset >> page_cluster) << page_cluster;
+ if (!page_cluster) /* no readahead */
+ return 0;
+ toff = (SWP_OFFSET(entry) >> page_cluster) << page_cluster;
+ if (!toff) /* first page is swap header */
+ toff++, i--;
+ *offset = toff;
X
X swap_device_lock(swapdev);
X do {
X /* Don't read-ahead past the end of the swap area */
X if (toff >= swapdev->max)
X break;
- /* Don't read in bad or busy pages */
+ /* Don't read in free or bad pages */
X if (!swapdev->swap_map[toff])
X break;
X if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
X break;
- swapdev->swap_map[toff]++;
X toff++;
X ret++;
X } while (--i);
diff -u --recursive --new-file v2.4.9/linux/mm/vmalloc.c linux/mm/vmalloc.c
--- v2.4.9/linux/mm/vmalloc.c Tue May 22 19:54:04 2001
+++ linux/mm/vmalloc.c Mon Sep 17 13:16:31 2001
@@ -144,7 +144,6 @@
X int ret;
X
X dir = pgd_offset_k(address);
- flush_cache_all();
X spin_lock(&init_mm.page_table_lock);
X do {
X pmd_t *pmd;
@@ -164,7 +163,6 @@
X ret = 0;
X } while (address && (address < end));
X spin_unlock(&init_mm.page_table_lock);
- flush_tlb_all();
X return ret;
X }
X
diff -u --recursive --new-file v2.4.9/linux/mm/vmscan.c linux/mm/vmscan.c
--- v2.4.9/linux/mm/vmscan.c Mon Aug 27 12:41:49 2001
+++ linux/mm/vmscan.c Sun Sep 23 09:58:51 2001
@@ -21,6 +21,7 @@
X #include <linux/init.h>
X #include <linux/highmem.h>
X #include <linux/file.h>
+#include <linux/compiler.h>
X
X #include <asm/pgalloc.h>
X
@@ -32,21 +33,6 @@
X */
X #define DEF_PRIORITY (6)
X
-#define MAX(a,b) ((a) > (b) ? (a) : (b))
-
-static inline void age_page_up(struct page *page)
-{
- unsigned age = page->age + PAGE_AGE_ADV;
- if (age > PAGE_AGE_MAX)
- age = PAGE_AGE_MAX;
- page->age = age;
-}
-
-static inline void age_page_down(struct page * page)
-{
- page->age /= 2;
-}
-
X /*
X * The swap-out function returns 1 if it successfully
X * scanned all the pages it was asked to (`count').
@@ -56,61 +42,32 @@
X * doesn't count as having freed a page.
X */
X
-/*
- * Estimate whether a zone has enough inactive or free pages..
- */
-static unsigned int zone_inactive_plenty(zone_t *zone)
-{
- unsigned int inactive;
-
- if (!zone->size)
- return 0;
-
- inactive = zone->inactive_dirty_pages;
- inactive += zone->inactive_clean_pages;
- inactive += zone->free_pages;
-
- return (inactive > (zone->size / 3));
-}
-
-static unsigned int zone_free_plenty(zone_t *zone)
-{
- unsigned int free;
-
- free = zone->free_pages;
- free += zone->inactive_clean_pages;
-
- return free > zone->pages_high*2;
-}
-
X /* mm->page_table_lock is held. mmap_sem is not held */
-static void try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page)
+static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page, zone_t * classzone)
X {
X pte_t pte;
X swp_entry_t entry;
-
- /*
- * If we are doing a zone-specific scan, do not
- * touch pages from zones which don't have a
- * shortage.
- */
- if (zone_inactive_plenty(page->zone))
- return;
+ int right_classzone;
X
X /* Don't look at this pte if it's been accessed recently. */
X if (ptep_test_and_clear_young(page_table)) {
- age_page_up(page);
- return;
+ flush_tlb_page(vma, address);
+ return 0;
X }
X
X if (TryLockPage(page))
- return;
+ return 0;
+
+ right_classzone = 1;
+ if (!memclass(page->zone, classzone))
+ right_classzone = 0;
X
X /* From this point on, the odds are that we're going to
X * nuke this pte, so read and clear the pte. This hook
X * is needed on CPUs which update the accessed and dirty
X * bits in hardware.
X */
+ flush_cache_page(vma, address);
X pte = ptep_get_and_clear(page_table);
X flush_tlb_page(vma, address);
X
@@ -123,22 +80,24 @@
X entry.val = page->index;
X if (pte_dirty(pte))
X set_page_dirty(page);
-set_swap_pte:
X swap_duplicate(entry);
+set_swap_pte:
X set_pte(page_table, swp_entry_to_pte(entry));
X drop_pte:
X mm->rss--;
- if (!page->age)
- deactivate_page(page);
X UnlockPage(page);
- page_cache_release(page);
- return;
+ {
+ int freeable = page_count(page) - !!page->buffers <= 2;
+ page_cache_release(page);
+ return freeable & right_classzone;
+ }
X }
X
X /*
X * Is it a clean page? Then it must be recoverable
X * by just paging it in again, and we can just drop
- * it..
+ * it.. or if it's dirty but has backing store,
+ * just mark the page dirty and drop it.
X *
X * However, this won't actually free any real
X * memory, as the page will just be in the page cache
@@ -148,20 +107,17 @@
X * Basically, this just makes it possible for us to do
X * some real work in the future in "refill_inactive()".
X */
- flush_cache_page(vma, address);
- if (!pte_dirty(pte))
+ if (page->mapping) {
+ if (pte_dirty(pte))
+ set_page_dirty(page);
X goto drop_pte;
-
+ }
X /*
- * Ok, it's really dirty. That means that
- * we should either create a new swap cache
- * entry for it, or we should write it back
- * to its own backing store.
+ * Check PageDirty as well as pte_dirty: page may
+ * have been brought back from swap by swapoff.
X */
- if (page->mapping) {
- set_page_dirty(page);
+ if (!pte_dirty(pte) && !PageDirty(page))
X goto drop_pte;
- }
X
X /*
X * This is a dirty, swappable page. First of all,
@@ -169,23 +125,25 @@
X * we have the swap cache set up to associate the
X * page with that swap entry.
X */
+ swap_list_lock();
X entry = get_swap_page();
- if (!entry.val)
- goto out_unlock_restore; /* No swap space left */
-
- /* Add it to the swap cache and mark it dirty */
- add_to_swap_cache(page, entry);
- set_page_dirty(page);
- goto set_swap_pte;
+ if (entry.val) {
+ /* Add it to the swap cache and mark it dirty */
+ add_to_swap_cache(page, entry);
+ swap_list_unlock();
+ set_page_dirty(page);
+ goto set_swap_pte;
+ }
X
-out_unlock_restore:
+ /* No swap space left */
+ swap_list_unlock();
X set_pte(page_table, pte);
X UnlockPage(page);
- return;
+ return 0;
X }
X
X /* mm->page_table_lock is held. mmap_sem is not held */
-static int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count)
+static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
X {
X pte_t * pte;
X unsigned long pmd_end;
@@ -209,20 +167,22 @@
X struct page *page = pte_page(*pte);
X
X if (VALID_PAGE(page) && !PageReserved(page)) {
- try_to_swap_out(mm, vma, address, pte, page);
- if (!--count)
+ count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
+ if (!count) {
+ address += PAGE_SIZE;
X break;
+ }
X }
X }
X address += PAGE_SIZE;
X pte++;
X } while (address && (address < end));
- mm->swap_address = address + PAGE_SIZE;
+ mm->swap_address = address;
X return count;
X }
X
X /* mm->page_table_lock is held. mmap_sem is not held */
-static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count)
+static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
X {
X pmd_t * pmd;
X unsigned long pgd_end;
@@ -242,7 +202,7 @@
X end = pgd_end;
X
X do {
- count = swap_out_pmd(mm, vma, pmd, address, end, count);
+ count = swap_out_pmd(mm, vma, pmd, address, end, count, classzone);
X if (!count)
X break;
X address = (address + PMD_SIZE) & PMD_MASK;
@@ -252,7 +212,7 @@
X }
X
X /* mm->page_table_lock is held. mmap_sem is not held */
-static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count)
+static inline int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count, zone_t * classzone)
X {
X pgd_t *pgdir;
X unsigned long end;
@@ -267,7 +227,7 @@
X if (address >= end)
X BUG();
X do {
- count = swap_out_pgd(mm, vma, pgdir, address, end, count);
+ count = swap_out_pgd(mm, vma, pgdir, address, end, count, classzone);
X if (!count)
X break;
X address = (address + PGDIR_SIZE) & PGDIR_MASK;
@@ -276,607 +236,426 @@
X return count;
X }
X
+/* Placeholder for swap_out(): may be updated by fork.c:mmput() */
+struct mm_struct *swap_mm = &init_mm;
+
X /*
- * Returns non-zero if we scanned all `count' pages
+ * Returns remaining count of pages to be swapped out by followup call.
X */
-static int swap_out_mm(struct mm_struct * mm, int count)
+static inline int swap_out_mm(struct mm_struct * mm, int count, int * mmcounter, zone_t * classzone)
X {
X unsigned long address;
X struct vm_area_struct* vma;
X
- if (!count)
- return 1;
- /*
- * Go through process' page directory.
- */
-
X /*
X * Find the proper vm-area after freezing the vma chain
X * and ptes.
X */
X spin_lock(&mm->page_table_lock);
X address = mm->swap_address;
+ if (address == TASK_SIZE || swap_mm != mm) {
+ /* We raced: don't count this mm but try again */
+ ++*mmcounter;
+ goto out_unlock;
+ }
X vma = find_vma(mm, address);
X if (vma) {
X if (address < vma->vm_start)
X address = vma->vm_start;
X
X for (;;) {
- count = swap_out_vma(mm, vma, address, count);
- if (!count)
- goto out_unlock;
+ count = swap_out_vma(mm, vma, address, count, classzone);
X vma = vma->vm_next;
X if (!vma)
X break;
+ if (!count)
+ goto out_unlock;
X address = vma->vm_start;
X }
X }
- /* Reset to 0 when we reach the end of address space */
- mm->swap_address = 0;
+ /* Indicate that we reached the end of address space */
+ mm->swap_address = TASK_SIZE;
X
X out_unlock:
X spin_unlock(&mm->page_table_lock);
- return !count;
-}
-
-#define SWAP_MM_SHIFT 4
-#define SWAP_SHIFT 5
-#define SWAP_MIN 8
-
-static inline int swap_amount(struct mm_struct *mm)
-{
- int nr = mm->rss >> SWAP_SHIFT;
- if (nr < SWAP_MIN) {
- nr = SWAP_MIN;
- if (nr > mm->rss)
- nr = mm->rss;
- }
- return nr;
+ return count;
X }
X
-static void swap_out(unsigned int priority, int gfp_mask)
+static int FASTCALL(swap_out(unsigned int priority, zone_t * classzone, unsigned int gfp_mask, int nr_pages));
+static int swap_out(unsigned int priority, zone_t * classzone, unsigned int gfp_mask, int nr_pages)
X {
X int counter;
- int retval = 0;
- struct mm_struct *mm = current->mm;
-
- /* Always start by trying to penalize the process that is allocating memory */
- if (mm)
- retval = swap_out_mm(mm, swap_amount(mm));
+ struct mm_struct *mm;
X
X /* Then, look at the other mm's */
- counter = (mmlist_nr << SWAP_MM_SHIFT) >> priority;
+ counter = mmlist_nr / priority;
X do {
- struct list_head *p;
+ if (unlikely(current->need_resched)) {
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ }
X
X spin_lock(&mmlist_lock);
- p = init_mm.mmlist.next;
- if (p == &init_mm.mmlist)
- goto empty;
-
- /* Move it to the back of the queue.. */
- list_del(p);
- list_add_tail(p, &init_mm.mmlist);
- mm = list_entry(p, struct mm_struct, mmlist);
+ mm = swap_mm;
+ while (mm->swap_address == TASK_SIZE || mm == &init_mm) {
+ mm->swap_address = 0;
+ mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist);
+ if (mm == swap_mm)
+ goto empty;
+ swap_mm = mm;
+ }
X
X /* Make sure the mm doesn't disappear when we drop the lock.. */
X atomic_inc(&mm->mm_users);
X spin_unlock(&mmlist_lock);
X
- /* Walk about 6% of the address space each time */
- retval |= swap_out_mm(mm, swap_amount(mm));
+ nr_pages = swap_out_mm(mm, nr_pages, &counter, classzone);
+
X mmput(mm);
+
+ if (!nr_pages)
+ return 1;
X } while (--counter >= 0);
- return;
+
+ return 0;
X
X empty:
X spin_unlock(&mmlist_lock);
+ return 0;
X }
X
-
-/**
- * reclaim_page - reclaims one page from the inactive_clean list
- * @zone: reclaim a page from this zone
- *
- * The pages on the inactive_clean can be instantly reclaimed.
- * The tests look impressive, but most of the time we'll grab
- * the first page of the list and exit successfully.
- */
-struct page * reclaim_page(zone_t * zone)
+static int FASTCALL(shrink_cache(int nr_pages, int max_scan, zone_t * classzone, unsigned int gfp_mask));
+static int shrink_cache(int nr_pages, int max_scan, zone_t * classzone, unsigned int gfp_mask)
X {
- struct page * page = NULL;
- struct list_head * page_lru;
- int maxscan;
+ struct list_head * entry;
X
- /*
- * We only need the pagemap_lru_lock if we don't reclaim the page,
- * but we have to grab the pagecache_lock before the pagemap_lru_lock
- * to avoid deadlocks and most of the time we'll succeed anyway.
- */
- spin_lock(&pagecache_lock);
X spin_lock(&pagemap_lru_lock);
- maxscan = zone->inactive_clean_pages;
- while ((page_lru = zone->inactive_clean_list.prev) !=
- &zone->inactive_clean_list && maxscan--) {
- page = list_entry(page_lru, struct page, lru);
-
- /* Wrong page on list?! (list corruption, should not happen) */
- if (!PageInactiveClean(page)) {
- printk("VM: reclaim_page, wrong page on list.\n");
- list_del(page_lru);
- page->zone->inactive_clean_pages--;
- continue;
- }
+ while (max_scan && (entry = inactive_list.prev) != &inactive_list) {
+ struct page * page;
+ swp_entry_t swap;
X
- /* Page is or was in use? Move it to the active list. */
- if (PageReferenced(page) || (!page->buffers && page_count(page) > 1)) {
- del_page_from_inactive_clean_list(page);
- add_page_to_active_list(page);
- page->age = PAGE_AGE_START;
- continue;
- }
-
- /* The page is dirty, or locked, move to inactive_dirty list. */
- if (page->buffers || PageDirty(page) || TryLockPage(page)) {
- del_page_from_inactive_clean_list(page);
- add_page_to_inactive_dirty_list(page);
+ if (unlikely(current->need_resched)) {
+ spin_unlock(&pagemap_lru_lock);
+ __set_current_state(TASK_RUNNING);
+ schedule();
+ spin_lock(&pagemap_lru_lock);
X continue;
X }
X
- /* OK, remove the page from the caches. */
- if (PageSwapCache(page)) {
- __delete_from_swap_cache(page);
- goto found_page;
- }
-
- if (page->mapping) {
- __remove_inode_page(page);
- goto found_page;
- }
-
- /* We should never ever get here. */
- printk(KERN_ERR "VM: reclaim_page, found unknown page\n");
- list_del(page_lru);
- zone->inactive_clean_pages--;
- UnlockPage(page);
- }
- /* Reset page pointer, maybe we encountered an unfreeable page. */
- page = NULL;
- goto out;
-
-found_page:
- memory_pressure++;
- del_page_from_inactive_clean_list(page);
- UnlockPage(page);
- page->age = PAGE_AGE_START;
- if (page_count(page) != 1)
- printk("VM: reclaim_page, found page with count %d!\n",
- page_count(page));
-out:
- spin_unlock(&pagemap_lru_lock);
- spin_unlock(&pagecache_lock);
- return page;
-}
+ page = list_entry(entry, struct page, lru);
X
-/**
- * page_launder - clean dirty inactive pages, move to inactive_clean list
- * @gfp_mask: what operations we are allowed to do
- * @sync: are we allowed to do synchronous IO in emergencies ?
- *
- * When this function is called, we are most likely low on free +
- * inactive_clean pages. Since we want to refill those pages as
- * soon as possible, we'll make two loops over the inactive list,
- * one to move the already cleaned pages to the inactive_clean lists
- * and one to (often asynchronously) clean the dirty inactive pages.
- *
- * In situations where kswapd cannot keep up, user processes will
- * end up calling this function. Since the user process needs to
- * have a page before it can continue with its allocation, we'll
- * do synchronous page flushing in that case.
- *
- * This code used to be heavily inspired by the FreeBSD source code.
- * Thanks go out to Matthew Dillon.
- */
-#define CAN_DO_FS (gfp_mask & __GFP_FS)
-int page_launder(int gfp_mask, int sync)
-{
- int maxscan, cleaned_pages;
- struct list_head * page_lru;
- struct page * page;
-
- cleaned_pages = 0;
-
- /* Will we wait on IO? */
- if (!sync)
- gfp_mask &= ~__GFP_WAIT;
-
- spin_lock(&pagemap_lru_lock);
- maxscan = nr_inactive_dirty_pages >> DEF_PRIORITY;
- while ((page_lru = inactive_dirty_list.prev) != &inactive_dirty_list &&
- maxscan-- > 0) {
- page = list_entry(page_lru, struct page, lru);
+ if (unlikely(!PageInactive(page) && !PageActive(page)))
+ BUG();
X
- /* Wrong page on list?! (list corruption, should not happen) */
- if (!PageInactiveDirty(page)) {
- printk("VM: page_launder, wrong page on list.\n");
- list_del(page_lru);
- nr_inactive_dirty_pages--;
- page->zone->inactive_dirty_pages--;
+ list_del(entry);
+ list_add(entry, &inactive_list);
+ if (PageTestandClearReferenced(page))
X continue;
- }
X
- /* Page is or was in use? Move it to the active list. */
- if (PageReferenced(page) || (!page->buffers && page_count(page) > 1) ||
- page_ramdisk(page)) {
- del_page_from_inactive_dirty_list(page);
- add_page_to_active_list(page);
- page->age = PAGE_AGE_START;
+ max_scan--;
+
+ if (unlikely(!memclass(page->zone, classzone)))
X continue;
- }
X
- /*
- * If this zone has plenty of pages free,
- * don't spend time on cleaning it.
- */
- if (zone_free_plenty(page->zone)) {
- list_del(page_lru);
- list_add(page_lru, &inactive_dirty_list);
+ /* Racy check to avoid trylocking when not worthwhile */
+ if (!page->buffers && page_count(page) != 1)
X continue;
- }
X
X /*
X * The page is locked. IO in progress?
X * Move it to the back of the list.
X */
- if (TryLockPage(page)) {
- list_del(page_lru);
- list_add(page_lru, &inactive_dirty_list);
+ if (unlikely(TryLockPage(page)))
X continue;
+
+ if (PageDirty(page) && is_page_cache_freeable(page)) {
+ /*
+ * It is not critical here to write it only if
+ * the page is unmapped beause any direct writer
+ * like O_DIRECT would set the PG_dirty bitflag
+ * on the phisical page after having successfully
+ * pinned it and after the I/O to the page is finished,
+ * so the direct writes to the page cannot get lost.
+ */
+ int (*writepage)(struct page *);
+
+ writepage = page->mapping->a_ops->writepage;
+ if ((gfp_mask & __GFP_FS) && writepage) {
+ ClearPageDirty(page);
+ page_cache_get(page);
+ spin_unlock(&pagemap_lru_lock);
+
+ writepage(page);
+ page_cache_release(page);
+
+ spin_lock(&pagemap_lru_lock);
+ continue;
+ }
X }
X
X /*
- * Dirty swap-cache page? Write it out if
- * last copy..
+ * If the page has buffers, try to free the buffer mappings
+ * associated with this page. If we succeed we try to free
+ * the page as well.
X */
- if (PageDirty(page)) {
- int (*writepage)(struct page *) = page->mapping->a_ops->writepage;
+ if (page->buffers) {
+ spin_unlock(&pagemap_lru_lock);
+
+ /* avoid to free a locked page */
+ page_cache_get(page);
X
- if (!writepage)
- goto page_active;
+ if (try_to_free_buffers(page, gfp_mask)) {
+ if (!page->mapping) {
+ /*
+ * Account we successfully freed a page
+ * of buffer cache.
+ */
+ atomic_dec(&buffermem_pages);
+
+ /*
+ * We must not allow an anon page
+ * with no buffers to be visible on
+ * the LRU, so we unlock the page after
+ * taking the lru lock
+ */
+ spin_lock(&pagemap_lru_lock);
+ UnlockPage(page);
+ __lru_cache_del(page);
X
- /* Can't do it? Move it to the back of the list */
- if (!CAN_DO_FS) {
- list_del(page_lru);
- list_add(page_lru, &inactive_dirty_list);
+ /* effectively free the page here */
+ page_cache_release(page);
+
+ if (--nr_pages)
+ continue;
+ break;
+ } else {
+ /*
+ * The page is still in pagecache so undo the stuff
+ * before the try_to_free_buffers since we've not
+ * finished and we can now try the next step.
+ */
+ page_cache_release(page);
+
+ spin_lock(&pagemap_lru_lock);
+ }
+ } else {
+ /* failed to drop the buffers so stop here */
X UnlockPage(page);
+ page_cache_release(page);
+
+ spin_lock(&pagemap_lru_lock);
X continue;
X }
+ }
X
- /* OK, do a physical asynchronous write to swap. */
- ClearPageDirty(page);
- page_cache_get(page);
- spin_unlock(&pagemap_lru_lock);
+ if (unlikely(!page->mapping))
+ BUG();
X
- writepage(page);
- page_cache_release(page);
+ if (unlikely(!spin_trylock(&pagecache_lock))) {
+ /* we hold the page lock so the page cannot go away from under us */
+ spin_unlock(&pagemap_lru_lock);
X
- /* And re-start the thing.. */
+ spin_lock(&pagecache_lock);
X spin_lock(&pagemap_lru_lock);
- continue;
X }
X
X /*
- * If the page has buffers, try to free the buffer mappings
- * associated with this page. If we succeed we either free
- * the page (in case it was a buffercache only page) or we
- * move the page to the inactive_clean list.
- *
- * On the first round, we should free all previously cleaned
- * buffer pages
+ * this is the non-racy check, it is critical to check
+ * PageDirty _after_ we made sure the page is freeable
+ * so not in use by anybody.
X */
- if (page->buffers) {
- int clearedbuf;
- int freed_page = 0;
+ if (!is_page_cache_freeable(page) || PageDirty(page)) {
+ spin_unlock(&pagecache_lock);
+ UnlockPage(page);
+ continue;
+ }
X
- /*
- * Since we might be doing disk IO, we have to
- * drop the spinlock and take an extra reference
- * on the page so it doesn't go away from under us.
- */
- del_page_from_inactive_dirty_list(page);
- page_cache_get(page);
- spin_unlock(&pagemap_lru_lock);
+ /* point of no return */
+ if (likely(!PageSwapCache(page))) {
+ swap.val = 0;
+ __remove_inode_page(page);
+ } else {
+ swap.val = page->index;
+ __delete_from_swap_cache(page);
+ }
+ spin_unlock(&pagecache_lock);
X
- /* Try to free the page buffers. */
- clearedbuf = try_to_free_buffers(page, gfp_mask);
+ __lru_cache_del(page);
X
- /*
- * Re-take the spinlock. Note that we cannot
- * unlock the page yet since we're still
- * accessing the page_struct here...
- */
+ if (unlikely(swap.val != 0)) {
+ /* must drop lru lock if getting swap_list lock */
+ spin_unlock(&pagemap_lru_lock);
+ swap_free(swap);
X spin_lock(&pagemap_lru_lock);
+ }
X
- /* The buffers were not freed. */
- if (!clearedbuf) {
- add_page_to_inactive_dirty_list(page);
-
- /* The page was only in the buffer cache. */
- } else if (!page->mapping) {
- atomic_dec(&buffermem_pages);
- freed_page = 1;
- cleaned_pages++;
-
- /* The page has more users besides the cache and us. */
- } else if (page_count(page) > 2) {
- add_page_to_active_list(page);
-
- /* OK, we "created" a freeable page. */
- } else /* page->mapping && page_count(page) == 2 */ {
- add_page_to_inactive_clean_list(page);
- cleaned_pages++;
- }
+ UnlockPage(page);
X
- /*
- * Unlock the page and drop the extra reference.
- * We can only do it here because we are accessing
- * the page struct above.
- */
- UnlockPage(page);
- page_cache_release(page);
+ /* effectively free the page here */
+ page_cache_release(page);
X
+ if (--nr_pages)
X continue;
- } else if (page->mapping && !PageDirty(page)) {
- /*
- * If a page had an extra reference in
- * deactivate_page(), we will find it here.
- * Now the page is really freeable, so we
- * move it to the inactive_clean list.
- */
- del_page_from_inactive_dirty_list(page);
- add_page_to_inactive_clean_list(page);
- UnlockPage(page);
- cleaned_pages++;
- } else {
-page_active:
- /*
- * OK, we don't know what to do with the page.
- * It's no use keeping it here, so we move it to
- * the active list.
- */
- del_page_from_inactive_dirty_list(page);
- add_page_to_active_list(page);
- UnlockPage(page);
- }
+ break;
X }
X spin_unlock(&pagemap_lru_lock);
X
- /* Return the number of pages moved to the inactive_clean list. */
- return cleaned_pages;
+ return nr_pages;
X }
X
-/**
- * refill_inactive_scan - scan the active list and find pages to deactivate
- * @priority: the priority at which to scan
+/*
+ * This moves pages from the active list to
+ * the inactive list.
X *
- * This function will scan a portion of the active list to find
- * unused pages, those pages will then be moved to the inactive list.
+ * We move them the other way when we see the
+ * reference bit on the page.
X */
-static int refill_inactive_scan(unsigned int priority)
+static void refill_inactive(int nr_pages)
X {
- struct list_head * page_lru;
- struct page * page;
- int maxscan = nr_active_pages >> priority;
- int page_active = 0;
- int nr_deactivated = 0;
+ struct list_head * entry;
X
- /* Take the lock while messing with the list... */
X spin_lock(&pagemap_lru_lock);
- while (maxscan-- > 0 && (page_lru = active_list.prev) != &active_list) {
- page = list_entry(page_lru, struct page, lru);
+ entry = active_list.prev;
+ while (nr_pages-- && entry != &active_list) {
+ struct page * page;
X
- /* Wrong page on list?! (list corruption, should not happen) */
- if (!PageActive(page)) {
- printk("VM: refill_inactive, wrong page on list.\n");
- list_del(page_lru);
- nr_active_pages--;
+ page = list_entry(entry, struct page, lru);
+ entry = entry->prev;
+ if (PageTestandClearReferenced(page)) {
+ list_del(&page->lru);
+ list_add(&page->lru, &active_list);
X continue;
X }
X
- /*
- * Do not deactivate pages from zones which
- * have plenty inactive pages.
- */
-
- if (zone_inactive_plenty(page->zone)) {
- page_active = 1;
- goto skip_page;
- }
-
- /* Do aging on the pages. */
- if (PageTestandClearReferenced(page)) {
- age_page_up(page);
- page_active = 1;
- } else {
- age_page_down(page);
- /*
- * Since we don't hold a reference on the page
- * ourselves, we have to do our test a bit more
- * strict then deactivate_page(). This is needed
- * since otherwise the system could hang shuffling
- * unfreeable pages from the active list to the
- * inactive_dirty list and back again...
- *
- * SUBTLE: we can have buffer pages with count 1.
- */
- if (page->age == 0 && page_count(page) <=
- (page->buffers ? 2 : 1)) {
- deactivate_page_nolock(page);
- page_active = 0;
- } else {
- page_active = 1;
- }
- }
- /*
- * If the page is still on the active list, move it
- * to the other end of the list. Otherwise we exit if
- * we have done enough work.
- */
- if (page_active || PageActive(page)) {
-skip_page:
- list_del(page_lru);
- list_add(page_lru, &active_list);
- } else {
- nr_deactivated++;
- }
+ del_page_from_active_list(page);
+ add_page_to_inactive_list(page);
X }
X spin_unlock(&pagemap_lru_lock);
-
- return nr_deactivated;
X }
X
-/*
- * Check if there are zones with a severe shortage of free pages,
- * or if all zones have a minor shortage.
- */
-int free_shortage(void)
+static int FASTCALL(shrink_caches(int priority, zone_t * classzone, unsigned int gfp_mask, int nr_pages));
+static int shrink_caches(int priority, zone_t * classzone, unsigned int gfp_mask, int nr_pages)
X {
- pg_data_t *pgdat;
- unsigned int global_free = 0;
- unsigned int global_target = freepages.high;
-
- /* Are we low on free pages anywhere? */
- pgdat = pgdat_list;
- do {
- int i;
- for(i = 0; i < MAX_NR_ZONES; i++) {
- zone_t *zone = pgdat->node_zones+ i;
- unsigned int free;
+ int max_scan = nr_inactive_pages / priority;
X
- if (!zone->size)
- continue;
+ nr_pages -= kmem_cache_reap(gfp_mask);
+ if (nr_pages <= 0)
+ return 0;
X
- free = zone->free_pages;
- free += zone->inactive_clean_pages;
+ /* Do we want to age the active list? */
+ if (nr_inactive_pages < nr_active_pages*2)
+ refill_inactive(nr_pages);
X
- /* Local shortage? */
- if (free < zone->pages_low)
- return 1;
+ nr_pages = shrink_cache(nr_pages, max_scan, classzone, gfp_mask);
+ if (nr_pages <= 0)
+ return 0;
X
- global_free += free;
- }
- pgdat = pgdat->node_next;
- } while (pgdat);
+ shrink_dcache_memory(priority, gfp_mask);
+ shrink_icache_memory(priority, gfp_mask);
X
- /* Global shortage? */
- return global_free < global_target;
+ return nr_pages;
X }
X
-/*
- * Are we low on inactive pages globally or in any zone?
- */
-int inactive_shortage(void)
+int try_to_free_pages(zone_t * classzone, unsigned int gfp_mask, unsigned int order)
X {
- pg_data_t *pgdat;
- unsigned int global_target = freepages.high + inactive_target;
- unsigned int global_incative = 0;
+ int priority = DEF_PRIORITY;
+ int ret = 0;
X
- pgdat = pgdat_list;
X do {
- int i;
- for(i = 0; i < MAX_NR_ZONES; i++) {
- zone_t *zone = pgdat->node_zones + i;
- unsigned int inactive;
+ int nr_pages = SWAP_CLUSTER_MAX;
+ nr_pages = shrink_caches(priority, classzone, gfp_mask, nr_pages);
+ if (nr_pages <= 0)
+ return 1;
X
- if (!zone->size)
- continue;
+ ret |= swap_out(priority, classzone, gfp_mask, SWAP_CLUSTER_MAX << 2);
+ } while (--priority);
X
- inactive = zone->inactive_dirty_pages;
- inactive += zone->inactive_clean_pages;
- inactive += zone->free_pages;
+ return ret;
+}
X
- /* Local shortage? */
- if (inactive < zone->pages_high)
- return 1;
+DECLARE_WAIT_QUEUE_HEAD(kswapd_wait);
X
- global_incative += inactive;
- }
- pgdat = pgdat->node_next;
- } while (pgdat);
+static int check_classzone_need_balance(zone_t * classzone)
+{
+ zone_t * first_classzone;
X
- /* Global shortage? */
- return global_incative < global_target;
+ first_classzone = classzone->zone_pgdat->node_zones;
+ while (classzone >= first_classzone) {
+ if (classzone->free_pages > classzone->pages_high)
+ return 0;
+ classzone--;
+ }
+ return 1;
X }
X
-/*
- * Loop until we are no longer under an inactive or free
- * shortage. Return 1 on success, 0 if we failed to get
- * there even after "maxtry" loops.
- */
-#define INACTIVE_SHORTAGE 1
-#define FREE_SHORTAGE 2
-#define GENERAL_SHORTAGE 4
-static int do_try_to_free_pages(unsigned int gfp_mask, int user)
-{
- /* Always walk at least the active queue when called */
- int shortage = INACTIVE_SHORTAGE;
- int maxtry;
-
- maxtry = 1 << DEF_PRIORITY;
- do {
- /*
- * If needed, we move pages from the active list
- * to the inactive list.
- */
- if (shortage & INACTIVE_SHORTAGE) {
- /* Walk the VM space for a bit.. */
- swap_out(DEF_PRIORITY, gfp_mask);
+static int kswapd_balance_pgdat(pg_data_t * pgdat)
+{
+ int need_more_balance = 0, i;
+ zone_t * zone;
X
- /* ..and refill the inactive list */
- refill_inactive_scan(DEF_PRIORITY);
+ for (i = pgdat->nr_zones-1; i >= 0; i--) {
+ zone = pgdat->node_zones + i;
+ if (unlikely(current->need_resched))
+ schedule();
+ if (!zone->need_balance)
+ continue;
+ if (!try_to_free_pages(zone, GFP_KSWAPD, 0)) {
+ zone->need_balance = 0;
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ*5);
+ continue;
X }
+ if (check_classzone_need_balance(zone))
+ need_more_balance = 1;
+ else
+ zone->need_balance = 0;
+ }
X
- /*
- * If we're low on free pages, move pages from the
- * inactive_dirty list to the inactive_clean list.
- *
- * Usually bdflush will have pre-cleaned the pages
- * before we get around to moving them to the other
- * list, so this is a relatively cheap operation.
- */
- if (shortage & FREE_SHORTAGE)
- page_launder(gfp_mask, user);
-
- /*
- * Reclaim unused slab cache if we were short on memory.
- */
- if (shortage & GENERAL_SHORTAGE) {
- shrink_dcache_memory(DEF_PRIORITY, gfp_mask);
- shrink_icache_memory(DEF_PRIORITY, gfp_mask);
+ return need_more_balance;
+}
X
- kmem_cache_reap(gfp_mask);
- }
+static void kswapd_balance(void)
+{
+ int need_more_balance;
+ pg_data_t * pgdat;
X
- if (current->need_resched) {
- __set_current_state(TASK_RUNNING);
- schedule();
- }
+ do {
+ need_more_balance = 0;
+ pgdat = pgdat_list;
+ do
+ need_more_balance |= kswapd_balance_pgdat(pgdat);
+ while ((pgdat = pgdat->node_next));
+ } while (need_more_balance);
+}
X
- shortage = 0;
- if (inactive_shortage())
- shortage |= INACTIVE_SHORTAGE | GENERAL_SHORTAGE;
- if (free_shortage())
- shortage |= FREE_SHORTAGE | GENERAL_SHORTAGE;
+static int kswapd_can_sleep_pgdat(pg_data_t * pgdat)
+{
+ zone_t * zone;
+ int i;
X
- if (--maxtry <= 0)
- break;
- } while (shortage);
+ for (i = pgdat->nr_zones-1; i >= 0; i--) {
+ zone = pgdat->node_zones + i;
+ if (!zone->need_balance)
+ continue;
+ return 0;
+ }
X
- return !shortage;
+ return 1;
X }
X
-DECLARE_WAIT_QUEUE_HEAD(kswapd_wait);
-DECLARE_WAIT_QUEUE_HEAD(kswapd_done);
+static int kswapd_can_sleep(void)
+{
+ pg_data_t * pgdat;
+
+ pgdat = pgdat_list;
+ do {
+ if (kswapd_can_sleep_pgdat(pgdat))
+ continue;
+ return 0;
+ } while ((pgdat = pgdat->node_next));
+
+ return 1;
+}
X
X /*
X * The background pageout daemon, started as a kernel thread
@@ -894,6 +673,7 @@
X int kswapd(void *unused)
X {
X struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
X
X daemonize();
X strcpy(tsk->comm, "kswapd");
@@ -917,107 +697,31 @@
SHAR_EOF
true || echo 'restore of patch-2.4.10 failed'
fi
echo 'End of part 193'
echo 'File patch-2.4.10 is continued in part 194'
echo "194" > _shar_seq_.tmp
exit 0
#!/bin/sh -x
# this is part 194 of a 197 - part archive
# do not concatenate these parts, unpack them in order with /bin/sh
# file patch-2.4.10 continued
if test ! -r _shar_seq_.tmp; then
echo 'Please unpack part 1 first!'
exit 1
fi
(read Scheck
if test "$Scheck" != 194; then
echo "Please unpack part $Scheck next!"
exit 1
else
exit 0
fi
) < _shar_seq_.tmp || exit 1
if test ! -f _shar_wnt_.tmp; then
echo 'x - still skipping patch-2.4.10'
else
echo 'x - continuing with patch-2.4.10'
sed 's/^X//' << 'SHAR_EOF' >> 'patch-2.4.10' &&
X * Kswapd main loop.
X */
X for (;;) {
- static long recalc = 0;
-
- /* Once a second ... */
- if (time_after(jiffies, recalc + HZ)) {
- recalc = jiffies;
-
- /* Recalculate VM statistics. */
- recalculate_vm_stats();
- }
-
- if (!do_try_to_free_pages(GFP_KSWAPD, 1)) {
- if (out_of_memory())
- oom_kill();
- continue;
- }
-
- run_task_queue(&tq_disk);
- interruptible_sleep_on_timeout(&kswapd_wait, HZ);
- }
-}
-
-void wakeup_kswapd(void)
-{
- if (waitqueue_active(&kswapd_wait))
- wake_up_interruptible(&kswapd_wait);
-}
-
-/*
- * Called by non-kswapd processes when they want more
- * memory but are unable to sleep on kswapd because
- * they might be holding some IO locks ...
- */
-int try_to_free_pages(unsigned int gfp_mask)
-{
- int ret = 1;
-
- if (gfp_mask & __GFP_WAIT) {
- current->flags |= PF_MEMALLOC;
- ret = do_try_to_free_pages(gfp_mask, 1);
- current->flags &= ~PF_MEMALLOC;
- }
-
- return ret;
-}
-
-DECLARE_WAIT_QUEUE_HEAD(kreclaimd_wait);
-/*
- * Kreclaimd will move pages from the inactive_clean list to the
- * free list, in order to keep atomic allocations possible under
- * all circumstances.
- */
-int kreclaimd(void *unused)
-{
- struct task_struct *tsk = current;
- pg_data_t *pgdat;
-
- daemonize();
- strcpy(tsk->comm, "kreclaimd");
- sigfillset(&tsk->blocked);
- current->flags |= PF_MEMALLOC;
+ __set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kswapd_wait, &wait);
X
- while (1) {
+ mb();
+ if (kswapd_can_sleep())
+ schedule();
X
- /*
- * We sleep until someone wakes us up from
- * page_alloc.c::__alloc_pages().
- */
- interruptible_sleep_on(&kreclaimd_wait);
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kswapd_wait, &wait);
X
X /*
- * Move some pages from the inactive_clean lists to
- * the free lists, if it is needed.
+ * If we actually get into a low-memory situation,
+ * the processes needing more memory will wake us
+ * up on a more timely basis.
X */
- pgdat = pgdat_list;
- do {
- int i;
- for(i = 0; i < MAX_NR_ZONES; i++) {
- zone_t *zone = pgdat->node_zones + i;
- if (!zone->size)
- continue;
-
- while (zone->free_pages < zone->pages_low) {
- struct page * page;
- page = reclaim_page(zone);
- if (!page)
- break;
- __free_page(page);
- }
- }
- pgdat = pgdat->node_next;
- } while (pgdat);
+ kswapd_balance();
+ run_task_queue(&tq_disk);
X }
X }
X
-
X static int __init kswapd_init(void)
X {
- printk("Starting kswapd v1.8\n");
+ printk("Starting kswapd\n");
X swap_setup();
X kernel_thread(kswapd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
- kernel_thread(kreclaimd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
X return 0;
X }
X
diff -u --recursive --new-file v2.4.9/linux/net/appletalk/ddp.c linux/net/appletalk/ddp.c
--- v2.4.9/linux/net/appletalk/ddp.c Mon Aug 27 12:41:49 2001
+++ linux/net/appletalk/ddp.c Mon Sep 10 07:57:00 2001
@@ -752,7 +752,7 @@
X
X /* ioctl calls. Shouldn't even need touching */
X /* Device configuration ioctl calls */
-int atif_ioctl(int cmd, void *arg)
+static int atif_ioctl(int cmd, void *arg)
X {
X static char aarp_mcast[6] = {0x09, 0x00, 0x00, 0xFF, 0xFF, 0xFF};
X struct ifreq atreq;
@@ -1390,7 +1390,7 @@
X
X /* Trim buffer in case of stray trailing data */
X origlen = skb->len;
- skb_trim(skb, min(unsigned int, skb->len, ddphv.deh_len));
+ skb_trim(skb, min_t(unsigned int, skb->len, ddphv.deh_len));
X
X /*
X * Size check to see if ddp->deh_len was crap
@@ -1455,7 +1455,7 @@
X }
X
X /* Fix up skb->len field */
- skb_trim(skb, min(unsigned int, origlen, rt->dev->hard_header_len +
+ skb_trim(skb, min_t(unsigned int, origlen, rt->dev->hard_header_len +
X ddp_dl->header_length + ddphv.deh_len));
X
X /* Mend the byte order */
@@ -1855,7 +1855,15 @@
X case SIOCDIFADDR:
X case SIOCSARP: /* proxy AARP */
X case SIOCDARP: /* proxy AARP */
- return atif_ioctl(cmd, (void *)arg);
+ {
+ int ret;
+
+ rtnl_lock();
+ ret = atif_ioctl(cmd, (void *)arg);
+ rtnl_unlock();
+
+ return ret;
+ }
X /* Physical layer ioctl calls */
X case SIOCSIFLINK:
X case SIOCGIFHWADDR:
diff -u --recursive --new-file v2.4.9/linux/net/ax25/af_ax25.c linux/net/ax25/af_ax25.c
--- v2.4.9/linux/net/ax25/af_ax25.c Mon Aug 27 12:41:49 2001
+++ linux/net/ax25/af_ax25.c Thu Sep 13 17:16:23 2001
@@ -751,7 +751,7 @@
X return -EFAULT;
X
X valptr = (void *) &val;
- length = min(unsigned int, maxlen, sizeof(int));
+ length = min_t(unsigned int, maxlen, sizeof(int));
X
X switch (optname) {
X case AX25_WINDOW:
@@ -803,7 +803,7 @@
X
X if (ax25_dev != NULL && ax25_dev->dev != NULL) {
X strncpy(devname, ax25_dev->dev->name, IFNAMSIZ);
- length = min(unsigned int, strlen(ax25_dev->dev->name)+1, maxlen);
+ length = min_t(unsigned int, strlen(ax25_dev->dev->name)+1, maxlen);
X devname[length-1] = '\0';
X } else {
X *devname = '\0';
@@ -1841,7 +1841,7 @@
X EXPORT_SYMBOL(null_ax25_address);
X EXPORT_SYMBOL(ax25_display_timer);
X
-static const char banner[] __initdata = KERN_INFO "NET4: G4KLX/GW4PTS AX.25 for Linux. Version 0.37 for Linux NET4.0\n";
+static char banner[] __initdata = KERN_INFO "NET4: G4KLX/GW4PTS AX.25 for Linux. Version 0.37 for Linux NET4.0\n";
X
X static int __init ax25_init(void)
X {
diff -u --recursive --new-file v2.4.9/linux/net/ax25/ax25_in.c linux/net/ax25/ax25_in.c
--- v2.4.9/linux/net/ax25/ax25_in.c Tue Jul 3 17:08:22 2001
+++ linux/net/ax25/ax25_in.c Sun Sep 9 10:52:35 2001
@@ -431,7 +431,7 @@
X }
X } else {
X /* Reverse the source SABM's path */
- memcpy(&ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
+ memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
X }
X
X if ((*skb->data & ~AX25_PF) == AX25_SABME) {
diff -u --recursive --new-file v2.4.9/linux/net/bluetooth/af_bluetooth.c linux/net/bluetooth/af_bluetooth.c
--- v2.4.9/linux/net/bluetooth/af_bluetooth.c Tue Jul 3 17:08:22 2001
+++ linux/net/bluetooth/af_bluetooth.c Fri Sep 7 09:28:38 2001
@@ -25,8 +25,9 @@
X /*
X * BlueZ Bluetooth address family and sockets.
X *
- * $Id: af_bluetooth.c,v 1.1 2001/06/01 08:12:11 davem Exp $
+ * $Id: af_bluetooth.c,v 1.4 2001/07/05 18:42:44 maxk Exp $
X */
+#define VERSION "1.1"
X
X #include <linux/config.h>
X #include <linux/module.h>
@@ -36,7 +37,7 @@
X #include <linux/kernel.h>
X #include <linux/major.h>
X #include <linux/sched.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
X #include <linux/skbuff.h>
X #include <linux/init.h>
X #include <linux/proc_fs.h>
@@ -129,7 +130,7 @@
X int bluez_init(void)
X {
X INF("BlueZ HCI Core ver %s Copyright (C) 2000,2001 Qualcomm Inc",
- BLUEZ_VER);
+ VERSION);
X INF("Written 2000,2001 by Maxim Krasnyansky <ma...@qualcomm.com>");
X
X proc_mkdir("bluetooth", NULL);
@@ -161,4 +162,7 @@
X #ifdef MODULE
X module_init(bluez_init);
X module_exit(bluez_cleanup);
+
+MODULE_AUTHOR("Maxim Krasnyansky <ma...@qualcomm.com>");
+MODULE_DESCRIPTION("BlueZ HCI Core ver " VERSION);
X #endif
diff -u --recursive --new-file v2.4.9/linux/net/bluetooth/hci_core.c linux/net/bluetooth/hci_core.c
--- v2.4.9/linux/net/bluetooth/hci_core.c Tue Jul 3 17:08:22 2001
+++ linux/net/bluetooth/hci_core.c Sun Sep 9 10:52:35 2001
@@ -25,7 +25,7 @@
X /*
X * BlueZ HCI Core.
X *
- * $Id: hci_core.c,v 1.2 2001/06/01 16:57:03 davem Exp $
+ * $Id: hci_core.c,v 1.22 2001/08/03 04:19:50 maxk Exp $
X */
X
X #include <linux/config.h>
@@ -36,7 +36,7 @@
X #include <linux/kernel.h>
X #include <linux/major.h>
X #include <linux/sched.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
X #include <linux/poll.h>
X #include <linux/fcntl.h>
X #include <linux/init.h>
@@ -47,6 +47,7 @@
X
X #include <asm/system.h>
X #include <asm/uaccess.h>
+#include <asm/unaligned.h>
X
X #include <net/bluetooth/bluetooth.h>
X #include <net/bluetooth/bluez.h>
@@ -103,7 +104,7 @@
X return notifier_chain_unregister(&hci_dev_notifier, nb);
X }
X
-static __inline__ void hci_notify(struct hci_dev *hdev, int event)
+static inline void hci_notify(struct hci_dev *hdev, int event)
X {
X notifier_call_chain(&hci_dev_notifier, event, hdev);
X }
@@ -196,14 +197,14 @@
X }
X
X /* --------- BaseBand connections --------- */
-static struct hci_conn *hci_conn_add(struct hci_dev *hdev, __u16 handle, bdaddr_t *dst)
+static struct hci_conn *hci_conn_add(struct hci_dev *hdev, __u16 handle, __u8 type, bdaddr_t *dst)
X {
X struct hci_conn *conn;
X
X DBG("%s handle %d dst %s", hdev->name, handle, batostr(dst));
X
- if (handle > HCI_MAX_CONN) {
- ERR("%s BUG handle %d is to large", hdev->name, handle);
+ if ( conn_hash_lookup(&hdev->conn_hash, handle)) {
+ ERR("%s handle 0x%x already exists", hdev->name, handle);
X return NULL;
X }
X
@@ -213,33 +214,28 @@
X
X bacpy(&conn->dst, dst);
X conn->handle = handle;
+ conn->type = type;
X conn->hdev = hdev;
X
- skb_queue_head_init(&conn->acl_q);
- skb_queue_head_init(&conn->sco_q);
+ skb_queue_head_init(&conn->data_q);
X
- if (conn_hash_add(&hdev->conn_hash, handle, conn)) {
- hci_dev_hold(hdev);
- return conn;
- } else {
- kfree(conn);
- return NULL;
- }
+ hci_dev_hold(hdev);
+ conn_hash_add(&hdev->conn_hash, handle, conn);
+
+ return conn;
X }
X
X static int hci_conn_del(struct hci_dev *hdev, struct hci_conn *conn)
X {
X DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
X
- if (conn_hash_del(&hdev->conn_hash, conn->handle)) {
- hci_dev_put(hdev);
+ conn_hash_del(&hdev->conn_hash, conn);
+ hci_dev_put(hdev);
X
- /* Unacked frames */
- hdev->acl_cnt += conn->acl_sent;
- }
+ /* Unacked frames */
+ hdev->acl_cnt += conn->sent;
X
- bluez_skb_queue_purge(&conn->acl_q);
- bluez_skb_queue_purge(&conn->sco_q);
+ skb_queue_purge(&conn->data_q);
X
X kfree(conn);
X return 0;
@@ -248,22 +244,28 @@
X /* Drop all connection on the device */
X static void hci_conn_hash_flush(struct hci_dev *hdev)
X {
- struct hci_proto *hp = GET_HPROTO(HCI_PROTO_L2CAP);
X struct conn_hash *h = &hdev->conn_hash;
- int i;
+ struct hci_proto *hp;
+ struct list_head *p;
X
X DBG("hdev %s", hdev->name);
X
- for (i = 0; i < HCI_MAX_CONN; i++) {
- struct hci_conn *conn;
+ p = h->list.next;
+ while (p != &h->list) {
+ struct hci_conn *c;
X
- if (!(conn = conn_hash_lookup(h, i)))
- continue;
+ c = list_entry(p, struct hci_conn, list);
+ p = p->next;
X
- if (hp && hp->disconn_ind)
- hp->disconn_ind(conn, 0x16);
-
- hci_conn_del(hdev, conn);
+ if (c->type == ACL_LINK) {
+ /* ACL link notify L2CAP layer */
+ if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->disconn_ind)
+ hp->disconn_ind(c, 0x16);
+ } else {
+ /* SCO link (no notification) */
+ }
+
+ hci_conn_del(hdev, c);
X }
X }
X
@@ -294,9 +296,14 @@
X inquiry_cache_unlock_bh(cache);
X
X bacpy(&cc.bdaddr, bdaddr);
- cc.pkt_type = __cpu_to_le16(HCI_DM1 | HCI_DM3 | HCI_DM5 | HCI_DH1 | HCI_DH3 | HCI_DH5);
+ cc.pkt_type = __cpu_to_le16(hdev->pkt_type);
X cc.clock_offset = __cpu_to_le16(clock_offset);
- cc.role_switch = 0;
+
+ if (lmp_rswitch_capable(hdev))
+ cc.role_switch = 0x01;
+ else
+ cc.role_switch = 0x00;
+
X hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, CREATE_CONN_CP_SIZE, &cc);
X
X return 0;
@@ -316,17 +323,17 @@
X }
X
X /* --------- HCI request handling ------------ */
-static __inline__ void hci_req_lock(struct hci_dev *hdev)
+static inline void hci_req_lock(struct hci_dev *hdev)
X {
X down(&hdev->req_lock);
X }
X
-static __inline__ void hci_req_unlock(struct hci_dev *hdev)
+static inline void hci_req_unlock(struct hci_dev *hdev)
X {
X up(&hdev->req_lock);
X }
X
-static __inline__ void hci_req_complete(struct hci_dev *hdev, int result)
+static inline void hci_req_complete(struct hci_dev *hdev, int result)
X {
X DBG("%s result 0x%2.2x", hdev->name, result);
X
@@ -337,7 +344,7 @@
X }
X }
X
-static __inline__ void hci_req_cancel(struct hci_dev *hdev, int err)
+static inline void hci_req_cancel(struct hci_dev *hdev, int err)
X {
X DBG("%s err 0x%2.2x", hdev->name, err);
X
@@ -392,7 +399,7 @@
X return err;
X }
X
-static __inline__ int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
+static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
X unsigned long opt, __u32 timeout)
X {
X int ret;
@@ -423,6 +430,9 @@
X
X /* Mandatory initialization */
X
+ /* Read Local Supported Features */
+ hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
+
X /* Read Buffer Size (ACL mtu, max pkt, etc.) */
X hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
X
@@ -433,10 +443,9 @@
X
X /* Clear Event Filters */
X ec.flt_type = FLT_CLEAR_ALL;
- ec.cond_type = 0;
- hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, SET_EVENT_FLT_CP_SIZE, &ec);
+ hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &ec);
X
- /* Page timeout ~ 20 secs */
+ /* Page timeout ~20 secs */
X param = __cpu_to_le16(0x8000);
X hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m);
X
@@ -479,7 +488,7 @@
X hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, INQUIRY_CP_SIZE, &ic);
X }
X
-/* Open HCI device */
+/* HCI ioctl helpers */
X int hci_dev_open(__u16 dev)
X {
X struct hci_dev *hdev;
@@ -497,24 +506,43 @@
X goto done;
X }
X
- /* Initialize device */
X if (hdev->open(hdev)) {
X ret = -EIO;
X goto done;
X }
X
- atomic_set(&hdev->cmd_cnt, 1);
- hdev->cmd_sent= NULL;
- hdev->flags |= HCI_INIT;
+ if (hdev->flags & HCI_NORMAL) {
+ atomic_set(&hdev->cmd_cnt, 1);
+ hdev->flags |= HCI_INIT;
X
- __hci_request(hdev, hci_reset_req, 0, HZ);
+ //__hci_request(hdev, hci_reset_req, 0, HZ);
+ ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
+
+ hdev->flags &= ~HCI_INIT;
+ }
X
- if (!(ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT))) {
+ if (!ret) {
X hdev->flags |= HCI_UP;
X hci_notify(hdev, HCI_DEV_UP);
- }
+ } else {
+ /* Init failed, cleanup */
+ tasklet_kill(&hdev->rx_task);
+ tasklet_kill(&hdev->tx_task);
+ tasklet_kill(&hdev->cmd_task);
X
- hdev->flags &= ~HCI_INIT;
+ skb_queue_purge(&hdev->cmd_q);
+ skb_queue_purge(&hdev->rx_q);
+
+ if (hdev->flush)
+ hdev->flush(hdev);
+
+ if (hdev->sent_cmd) {
+ kfree_skb(hdev->sent_cmd);
+ hdev->sent_cmd = NULL;
+ }
+
+ hdev->close(hdev);
+ }
X
X done:
X hci_req_unlock(hdev);
@@ -523,7 +551,6 @@
X return ret;
X }
X
-/* Close HCI device */
X int hci_dev_close(__u16 dev)
X {
X struct hci_dev *hdev;
@@ -548,7 +575,8 @@
X hci_conn_hash_flush(hdev);
X
X /* Clear flags */
- hdev->flags &= (HCI_NORMAL | HCI_SOCK);
+ hdev->flags &= HCI_SOCK;
+ hdev->flags |= HCI_NORMAL;
X
X hci_notify(hdev, HCI_DEV_DOWN);
X
@@ -556,7 +584,7 @@
X hdev->flush(hdev);
X
X /* Reset device */
- bluez_skb_queue_purge(&hdev->cmd_q);
+ skb_queue_purge(&hdev->cmd_q);
X atomic_set(&hdev->cmd_cnt, 1);
X hdev->flags |= HCI_INIT;
X __hci_request(hdev, hci_reset_req, 0, HZ);
@@ -566,14 +594,14 @@
X tasklet_kill(&hdev->cmd_task);
X
X /* Drop queues */
- bluez_skb_queue_purge(&hdev->rx_q);
- bluez_skb_queue_purge(&hdev->cmd_q);
- bluez_skb_queue_purge(&hdev->raw_q);
+ skb_queue_purge(&hdev->rx_q);
+ skb_queue_purge(&hdev->cmd_q);
+ skb_queue_purge(&hdev->raw_q);
X
X /* Drop last sent command */
- if (hdev->cmd_sent) {
- bluez_skb_free(hdev->cmd_sent);
- hdev->cmd_sent = NULL;
+ if (hdev->sent_cmd) {
+ kfree_skb(hdev->sent_cmd);
+ hdev->sent_cmd = NULL;
X }
X
X /* After this point our queues are empty
@@ -588,6 +616,317 @@
X return 0;
X }
X
+int hci_dev_reset(__u16 dev)
+{
+ struct hci_dev *hdev;
+ int ret = 0;
+
+ if (!(hdev = hci_dev_get(dev)))
+ return -ENODEV;
+
+ hci_req_lock(hdev);
+ tasklet_disable(&hdev->tx_task);
+
+ if (!(hdev->flags & HCI_UP))
+ goto done;
+
+ /* Drop queues */
+ skb_queue_purge(&hdev->rx_q);
+ skb_queue_purge(&hdev->cmd_q);
+
+ inquiry_cache_flush(&hdev->inq_cache);
+
+ hci_conn_hash_flush(hdev);
+
+ if (hdev->flush)
+ hdev->flush(hdev);
+
+ atomic_set(&hdev->cmd_cnt, 1);
+ hdev->acl_cnt = 0; hdev->sco_cnt = 0;
+
+ ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
+
+done:
+ tasklet_enable(&hdev->tx_task);
+ hci_req_unlock(hdev);
+ hci_dev_put(hdev);
+
+ return ret;
+}
+
+int hci_dev_reset_stat(__u16 dev)
+{
+ struct hci_dev *hdev;
+ int ret = 0;
+
+ if (!(hdev = hci_dev_get(dev)))
+ return -ENODEV;
+
+ memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
+
+ hci_dev_put(hdev);
+
+ return ret;
+}
+
+int hci_dev_setauth(unsigned long arg)
+{
+ struct hci_dev *hdev;
+ struct hci_dev_req dr;
+ int ret = 0;
+
+ if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
+ return -EFAULT;
+
+ if (!(hdev = hci_dev_get(dr.dev_id)))
+ return -ENODEV;
+
+ ret = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
+
+ hci_dev_put(hdev);
+
+ return ret;
+}
+
+int hci_dev_setscan(unsigned long arg)
+{
+ struct hci_dev *hdev;
+ struct hci_dev_req dr;
+ int ret = 0;
+
+ if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
+ return -EFAULT;
+
+ if (!(hdev = hci_dev_get(dr.dev_id)))
+ return -ENODEV;
+
+ ret = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
+
+ hci_dev_put(hdev);
+
+ return ret;
+}
+
+int hci_dev_setptype(unsigned long arg)
+{
+ struct hci_dev *hdev;
+ struct hci_dev_req dr;
+ int ret = 0;
+
+ if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
+ return -EFAULT;
+
+ if (!(hdev = hci_dev_get(dr.dev_id)))
+ return -ENODEV;
+
+ hdev->pkt_type = (__u16) dr.dev_opt;
+
+ hci_dev_put(hdev);
+
+ return ret;
+}
+
+int hci_dev_list(unsigned long arg)
+{
+ struct hci_dev_list_req *dl;
+ struct hci_dev_req *dr;
+ struct hci_dev *hdev;
+ int i, n, size;
+ __u16 dev_num;
+
+ if (get_user(dev_num, (__u16 *) arg))
+ return -EFAULT;
+
+ /* Avoid long loop, overflow */
+ if (dev_num > 2048)
+ return -EINVAL;
+
+ size = dev_num * sizeof(struct hci_dev_req) + sizeof(__u16);
+
+ if (verify_area(VERIFY_WRITE, (void *) arg, size))
+ return -EFAULT;
+
+ if (!(dl = kmalloc(size, GFP_KERNEL)))
+ return -ENOMEM;
+ dr = dl->dev_req;
+
+ spin_lock_bh(&hdev_list_lock);
+ for (i = 0, n = 0; i < HCI_MAX_DEV && n < dev_num; i++) {
+ if ((hdev = hdev_list[i])) {
+ (dr + n)->dev_id = hdev->id;
+ (dr + n)->dev_opt = hdev->flags;
+ n++;
+ }
+ }
+ spin_unlock_bh(&hdev_list_lock);
+
+ dl->dev_num = n;
+ size = n * sizeof(struct hci_dev_req) + sizeof(__u16);
+
+ copy_to_user((void *) arg, dl, size);
+
+ return 0;
+}
+
+int hci_dev_info(unsigned long arg)
+{
+ struct hci_dev *hdev;
+ struct hci_dev_info di;
+ int err = 0;
+
+ if (copy_from_user(&di, (void *) arg, sizeof(di)))
+ return -EFAULT;
+
+ if (!(hdev = hci_dev_get(di.dev_id)))
+ return -ENODEV;
+
+ strcpy(di.name, hdev->name);
+ di.bdaddr = hdev->bdaddr;
+ di.type = hdev->type;
+ di.flags = hdev->flags;
+ di.pkt_type = hdev->pkt_type;
+ di.acl_mtu = hdev->acl_mtu;
+ di.acl_max = hdev->acl_max;
+ di.sco_mtu = hdev->sco_mtu;
+ di.sco_max = hdev->sco_max;
+
+ memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
+ memcpy(&di.features, &hdev->features, sizeof(di.features));
+
+ if (copy_to_user((void *) arg, &di, sizeof(di)))
+ err = -EFAULT;
+
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+__u32 hci_dev_setmode(struct hci_dev *hdev, __u32 mode)
+{
+ __u32 omode = hdev->flags & HCI_MODE_MASK;
+
+ hdev->flags &= ~HCI_MODE_MASK;
+ hdev->flags |= (mode & HCI_MODE_MASK);
+
+ return omode;
+}
+
+__u32 hci_dev_getmode(struct hci_dev *hdev)
+{
+ return hdev->flags & HCI_MODE_MASK;
+}
+
+int hci_conn_list(unsigned long arg)
+{
+ struct hci_conn_list_req req, *cl;
+ struct hci_conn_info *ci;
+ struct hci_dev *hdev;
+ struct list_head *p;
+ int n = 0, size;
+
+ if (copy_from_user(&req, (void *) arg, sizeof(req)))
+ return -EFAULT;
+
+ if (!(hdev = hci_dev_get(req.dev_id)))
+ return -ENODEV;
+
+ /* Set a limit to avoid overlong loops, and also numeric overflow - AC */
+ if(req.conn_num < 2048)
+ return -EINVAL;
+
+ size = req.conn_num * sizeof(struct hci_conn_info) + sizeof(req);
+
+ if (!(cl = kmalloc(size, GFP_KERNEL)))
+ return -ENOMEM;
+ ci = cl->conn_info;
+
+ local_bh_disable();
+ conn_hash_lock(&hdev->conn_hash);
+ list_for_each(p, &hdev->conn_hash.list) {
+ register struct hci_conn *c;
+ c = list_entry(p, struct hci_conn, list);
+
+ (ci + n)->handle = c->handle;
+ bacpy(&(ci + n)->bdaddr, &c->dst);
+ n++;
+ }
+ conn_hash_unlock(&hdev->conn_hash);
+ local_bh_enable();
+
+ cl->dev_id = hdev->id;
+ cl->conn_num = n;
+ size = n * sizeof(struct hci_conn_info) + sizeof(req);
+
+ hci_dev_put(hdev);
+
+ if(copy_to_user((void *) arg, cl, size))
+ return -EFAULT;
+ return 0;
+}
+
+int hci_inquiry(unsigned long arg)
+{
+ struct inquiry_cache *cache;
+ struct hci_inquiry_req ir;
+ struct hci_dev *hdev;
+ int err = 0, do_inquiry = 0;
+ long timeo;
+ __u8 *buf, *ptr;
+
+ ptr = (void *) arg;
+ if (copy_from_user(&ir, ptr, sizeof(ir)))
+ return -EFAULT;
+
+ if (!(hdev = hci_dev_get(ir.dev_id)))
+ return -ENODEV;
+
+ cache = &hdev->inq_cache;
+
+ inquiry_cache_lock(cache);
+ if (inquiry_cache_age(cache) > INQUIRY_CACHE_AGE_MAX || ir.flags & IREQ_CACHE_FLUSH) {
+ inquiry_cache_flush(cache);
+ do_inquiry = 1;
+ }
+ inquiry_cache_unlock(cache);
+
+ /* Limit inquiry time, also avoid overflows */
+
+ if(ir.length > 2048)
+ {
+ err = -EINVAL;
+ goto done;
+ }
+
+ timeo = ir.length * 2 * HZ;
+ if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
+ goto done;
+
+ /* cache_dump can't sleep. Therefore we allocate temp buffer and then
+ * copy it to the user space.
+ */
+ if (!(buf = kmalloc(sizeof(inquiry_info) * ir.num_rsp, GFP_KERNEL))) {
+ err = -ENOMEM;
+ goto done;
+ }
+ ir.num_rsp = inquiry_cache_dump(cache, ir.num_rsp, buf);
+
+ DBG("num_rsp %d", ir.num_rsp);
+
+ if (!verify_area(VERIFY_WRITE, ptr, sizeof(ir) + (sizeof(inquiry_info) * ir.num_rsp))) {
+ copy_to_user(ptr, &ir, sizeof(ir));
+ ptr += sizeof(ir);
+ copy_to_user(ptr, buf, sizeof(inquiry_info) * ir.num_rsp);
+ } else
+ err = -EFAULT;
+
+ kfree(buf);
+
+done:
+ hci_dev_put(hdev);
+
+ return err;
+}
+
X /* Interface to HCI drivers */
X
X /* Register HCI device */
@@ -608,6 +947,8 @@
X hdev->id = i;
X hdev->flags = HCI_NORMAL;
X
+ hdev->pkt_type = (HCI_DM1 | HCI_DH1);
+
X tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
X tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
X tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
@@ -725,7 +1066,7 @@
X struct hci_dev *hdev = (struct hci_dev *) skb->dev;
X
X if (!hdev) {
- bluez_skb_free(skb);
+ kfree_skb(skb);
X return -ENODEV;
X }
X
@@ -740,23 +1081,26 @@
X return hdev->send(skb);
X }
X
-/* ACL scheduler */
-static __inline__ struct hci_conn *hci_low_acl_sent(struct hci_dev *hdev, int *quote)
+/* Connection scheduler */
+static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
X {
X struct conn_hash *h = &hdev->conn_hash;
X struct hci_conn *conn = NULL;
- int i, num = 0, min = 0xffff;
+ int num = 0, min = 0xffff;
+ struct list_head *p;
X
X conn_hash_lock(h);
- for (i = 0; i < HCI_MAX_CONN; i++) {
- struct hci_conn *c;
+ list_for_each(p, &h->list) {
+ register struct hci_conn *c;
+
+ c = list_entry(p, struct hci_conn, list);
X
- if (!(c = __conn_hash_lookup(h,i)) || !skb_queue_len(&c->acl_q))
+ if (c->type != type || skb_queue_empty(&c->data_q))
X continue;
X num++;
X
- if (c->acl_sent < min) {
- min = c->acl_sent;
+ if (c->sent < min) {
+ min = c->sent;
X conn = c;
X }
X }
@@ -773,67 +1117,56 @@
X return conn;
X }
X
-static __inline__ void hci_sched_acl(struct hci_dev *hdev)
+static inline void hci_sched_acl(struct hci_dev *hdev)
X {
X struct hci_conn *conn;
- struct sk_buff *skb, *frag;
+ struct sk_buff *skb;
X int quote;
X
X DBG("%s", hdev->name);
X
- while (hdev->acl_cnt) {
- if (!(conn = hci_low_acl_sent(hdev, "e)))
- break;
-
- while (quote && (skb = skb_peek(&conn->acl_q))) {
- if (bluez_skb_frags(skb)+1 > hdev->acl_cnt) {
- /* FIXME: Schedule next connection */
- goto done;
- }
+ while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
+ while (quote && (skb = skb_dequeue(&conn->data_q))) {
+ DBG("skb %p len %d", skb, skb->len);
X
- if (!(frag = bluez_skb_clone(skb, GFP_ATOMIC)))
- break;
+ hci_send_frame(skb);
X
- skb_unlink(skb);
- do {
- DBG("frag %p len %d", frag, frag->len);
-
- hci_send_frame(frag);
-
- conn->acl_sent++;
- hdev->acl_cnt--;
- quote--;
- } while ((frag = bluez_skb_get_frag(skb)));
- kfree_skb(skb);
+ conn->sent++;
+ hdev->acl_cnt--;
+ quote--;
X }
X }
-done:
- return;
X }
X
X /* Schedule SCO */
-static __inline__ void hci_sched_sco(struct hci_dev *hdev)
+static inline void hci_sched_sco(struct hci_dev *hdev)
X {
- struct conn_hash *h = &hdev->conn_hash;
- struct sk_buff *skb;
- int i;
+ /* FIXME: For now we queue SCO packets to the raw queue
X
- DBG("%s", hdev->name);
-
- conn_hash_lock(h);
- for (i = 0; i< HCI_MAX_CONN; i++) {
- struct hci_conn *conn;
-
- if (!(conn = __conn_hash_lookup(h, i)))
- continue;
-
- while (hdev->sco_cnt && (skb = skb_dequeue(&conn->sco_q))) {
+ while (hdev->sco_cnt && (skb = skb_dequeue(&conn->data_q))) {
X hci_send_frame(skb);
X conn->sco_sent++;
X hdev->sco_cnt--;
X }
- }
- conn_hash_unlock(h);
+ */
+}
+
+/* Get data from the previously sent command */
+static void * hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
+{
+ hci_command_hdr *hc;
+
+ if (!hdev->sent_cmd)
+ return NULL;
+
+ hc = (void *) hdev->sent_cmd->data;
+
+ if (hc->opcode != __cpu_to_le16(cmd_opcode_pack(ogf, ocf)))
+ return NULL;
+
+ DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
+
+ return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
X }
X
X /* Send raw HCI frame */
@@ -842,31 +1175,31 @@
X struct hci_dev *hdev = (struct hci_dev *) skb->dev;
X
X if (!hdev) {
- bluez_skb_free(skb);
+ kfree_skb(skb);
X return -ENODEV;
X }
X
X DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
X
- /* Queue frame according it's type */
- switch (skb->pkt_type) {
- case HCI_COMMAND_PKT:
- skb_queue_tail(&hdev->cmd_q, skb);
- hci_sched_cmd(hdev);
- break;
-
- case HCI_ACLDATA_PKT:
- case HCI_SCODATA_PKT:
- /* FIXME:
- * Check header here and queue to aproptiate connection.
- */
- default:
- skb_queue_tail(&hdev->raw_q, skb);
- hci_sched_tx(hdev);
+ if (hdev->flags & HCI_NORMAL) {
+ /* Queue frame according it's type */
+ switch (skb->pkt_type) {
+ case HCI_COMMAND_PKT:
+ skb_queue_tail(&hdev->cmd_q, skb);
+ hci_sched_cmd(hdev);
+ return 0;
X
- return 0;
- };
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ /* FIXME:
+ * Check header here and queue to apropriate connection.
+ */
+ break;
+ }
+ }
X
+ skb_queue_tail(&hdev->raw_q, skb);
+ hci_sched_tx(hdev);
X return 0;
X }
X
@@ -885,7 +1218,7 @@
X }
X
X hc = (hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
- hc->opcode = __cpu_to_le16(cmd_opcode_pack(ocf, ogf));
+ hc->opcode = __cpu_to_le16(cmd_opcode_pack(ogf, ocf));
X hc->plen = plen;
X
X if (plen)
@@ -902,7 +1235,6 @@
X }
X
X /* Send ACL data */
-
X static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
X {
X int len = skb->len;
@@ -918,30 +1250,46 @@
X int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
X {
X struct hci_dev *hdev = conn->hdev;
- struct sk_buff *frag;
- int sent = 0;
+ struct sk_buff *list;
X
- DBG("%s conn %p len %d flags 0x%x", hdev->name, conn, skb->len, flags);
- DBG("frags %d", bluez_skb_frags(skb));
+ DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
X
- /* Add ACL header to all fragments */
- flags |= ACL_START;
- frag = skb;
- do {
- DBG("frag %p len %d", frag, frag->len);
- sent += frag->len;
-
- hci_add_acl_hdr(frag, conn->handle, flags);
- frag->pkt_type = HCI_ACLDATA_PKT;
- frag->dev = (void *) hdev;
+ skb->dev = (void *) hdev;
+ skb->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
X
- flags = ACL_CONT;
- } while ((frag = bluez_skb_next_frag(frag)));
+ if (!(list = skb_shinfo(skb)->frag_list)) {
+ /* Non fragmented */
+ DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
+
+ skb_queue_tail(&conn->data_q, skb);
+ } else {
+ /* Fragmented */
+ DBG("%s frag %p len %d", hdev->name, skb, skb->len);
X
- skb_queue_tail(&conn->acl_q, skb);
- hci_sched_tx(hdev);
+ skb_shinfo(skb)->frag_list = NULL;
+
+ /* Queue all fragments atomically */
+ spin_lock_bh(&conn->data_q.lock);
+
+ __skb_queue_tail(&conn->data_q, skb);
+ do {
+ skb = list; list = list->next;
+
+ skb->dev = (void *) hdev;
+ skb->pkt_type = HCI_ACLDATA_PKT;
+ hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
+
+ DBG("%s frag %p len %d", hdev->name, skb, skb->len);
+
+ __skb_queue_tail(&conn->data_q, skb);
+ } while (list);
X
- return sent;
+ spin_unlock_bh(&conn->data_q.lock);
+ }
+
+ hci_sched_tx(hdev);
+ return 0;
X }
X
X /* Send SCO data */
@@ -953,7 +1301,7 @@
X DBG("%s len %d", hdev->name, skb->len);
X
X if (skb->len > hdev->sco_mtu) {
- bluez_skb_free(skb);
+ kfree_skb(skb);
X return -EINVAL;
X }
X
@@ -965,7 +1313,7 @@
X
X skb->dev = (void *) hdev;
X skb->pkt_type = HCI_SCODATA_PKT;
- skb_queue_tail(&conn->sco_q, skb);
+ skb_queue_tail(&conn->data_q, skb);
X hci_sched_tx(hdev);
X
X return 0;
@@ -1001,6 +1349,8 @@
X static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
X {
X __u8 status, param;
+ void *sent;
+
X
X DBG("%s ocf 0x%x", hdev->name, ocf);
X
@@ -1022,8 +1372,11 @@
X break;
X
X case OCF_WRITE_AUTH_ENABLE:
+ if (!(sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE)))
+ break;
+
X status = *((__u8 *) skb->data);
- param = *(SENT_CMD_PARAM(hdev));
+ param = *((__u8 *) sent);
X
X if (!status) {
X if (param == AUTH_ENABLED)
@@ -1055,8 +1408,12 @@
X break;
X
X case OCF_WRITE_SCAN_ENABLE:
+ if (!(sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE)))
+ break;
X status = *((__u8 *) skb->data);
- param = *(SENT_CMD_PARAM(hdev));
+ param = *((__u8 *) sent);
+
+ DBG("param 0x%x", param);
X
X if (!status) {
X switch (param) {
@@ -1091,24 +1448,47 @@
X /* Command Complete OGF INFO_PARAM */
X static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
X {
- read_buffer_size_rp *rsp;
- read_bd_addr_rp *rap;
+ read_local_features_rp *lf;
+ read_buffer_size_rp *bs;
+ read_bd_addr_rp *ba;
X
X DBG("%s ocf 0x%x", hdev->name, ocf);
X
X switch (ocf) {
+ case OCF_READ_LOCAL_FEATURES:
+ lf = (read_local_features_rp *) skb->data;
+
+ if (lf->status) {
+ DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status);
+ break;
+ }
+
+ memcpy(hdev->features, lf->features, sizeof(hdev->features));
+
+ /* Adjust default settings according to features
+ * supported by device. */
+ if (hdev->features[0] & LMP_3SLOT)
+ hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
+
+ if (hdev->features[0] & LMP_5SLOT)
+ hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
+
+ DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, lf->features[0], lf->features[1], lf->features[2]);
+
+ break;
+
X case OCF_READ_BUFFER_SIZE:
- rsp = (read_buffer_size_rp *) skb->data;
+ bs = (read_buffer_size_rp *) skb->data;
X
- if (rsp->status) {
- DBG("%s READ_BUFFER_SIZE failed %d", hdev->name, rsp->status);
+ if (bs->status) {
+ DBG("%s READ_BUFFER_SIZE failed %d", hdev->name, bs->status);
X break;
X }
X
- hdev->acl_mtu = __le16_to_cpu(rsp->acl_mtu);
- hdev->sco_mtu = rsp->sco_mtu;
- hdev->acl_max = hdev->acl_cnt = __le16_to_cpu(rsp->acl_max_pkt);
- hdev->sco_max = hdev->sco_cnt = __le16_to_cpu(rsp->sco_max_pkt);
+ hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu);
+ hdev->sco_mtu = bs->sco_mtu;
+ hdev->acl_max = hdev->acl_cnt = __le16_to_cpu(bs->acl_max_pkt);
+ hdev->sco_max = hdev->sco_cnt = __le16_to_cpu(bs->sco_max_pkt);
X
X DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name,
X hdev->acl_mtu, hdev->sco_mtu, hdev->acl_max, hdev->sco_max);
@@ -1116,15 +1496,15 @@
X break;
X
X case OCF_READ_BD_ADDR:
- rap = (read_bd_addr_rp *) skb->data;
+ ba = (read_bd_addr_rp *) skb->data;
X
- if (!rap->status) {
- bacpy(&hdev->bdaddr, &rap->bdaddr);
+ if (!ba->status) {
+ bacpy(&hdev->bdaddr, &ba->bdaddr);
X } else {
- DBG("%s: READ_BD_ADDR failed %d", hdev->name, rap->status);
+ DBG("%s: READ_BD_ADDR failed %d", hdev->name, ba->status);
X }
X
- hci_req_complete(hdev, rap->status);
+ hci_req_complete(hdev, ba->status);
X break;
X
X default:
@@ -1143,7 +1523,11 @@
X switch (ocf) {
X case OCF_CREATE_CONN:
X if (status) {
- create_conn_cp *cc = (void *) SENT_CMD_PARAM(hdev);
+ create_conn_cp *cc = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_CREATE_CONN);
+
+ if (!cc)
+ break;
+
X DBG("%s Create connection error: status 0x%x %s", hdev->name,
X status, batostr(&cc->bdaddr));
X
@@ -1235,13 +1619,20 @@
X accept_conn_req_cp ac;
X int accept = 0;
X
- DBG("%s Connection request: %s type 0x%x", hdev->name, batostr(&cr->bdaddr), cr->type);
+ DBG("%s Connection request: %s type 0x%x", hdev->name, batostr(&cr->bdaddr), cr->link_type);
X
X /* Notify upper protocols */
- if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->connect_ind) {
- tasklet_disable(&hdev->tx_task);
- accept = hp->connect_ind(hdev, &cr->bdaddr);
- tasklet_enable(&hdev->tx_task);
+ if (cr->link_type == ACL_LINK) {
+ /* ACL link notify L2CAP */
+ if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->connect_ind) {
+ tasklet_disable(&hdev->tx_task);
+ accept = hp->connect_ind(hdev, &cr->bdaddr);
+ tasklet_enable(&hdev->tx_task);
+ }
+ } else {
+ /* SCO link (no notification) */
+ /* FIXME: Should be accept it here or let the requester (app) accept it ? */
+ accept = 1;
X }
X
X if (accept) {
@@ -1270,11 +1661,16 @@
X tasklet_disable(&hdev->tx_task);
X
X if (!cc->status)
- conn = hci_conn_add(hdev, __le16_to_cpu(cc->handle), &cc->bdaddr);
+ conn = hci_conn_add(hdev, __le16_to_cpu(cc->handle), cc->link_type, &cc->bdaddr);
X
X /* Notify upper protocols */
- if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->connect_cfm)
- hp->connect_cfm(hdev, &cc->bdaddr, cc->status, conn);
+ if (cc->link_type == ACL_LINK) {
+ /* ACL link notify L2CAP layer */
+ if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->connect_cfm)
+ hp->connect_cfm(hdev, &cc->bdaddr, cc->status, conn);
+ } else {
+ /* SCO link (no notification) */
+ }
X
X tasklet_enable(&hdev->tx_task);
X }
@@ -1293,8 +1689,13 @@
X tasklet_disable(&hdev->tx_task);
X
X /* Notify upper protocols */
- if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->disconn_ind)
- hp->disconn_ind(conn, dc->reason);
+ if (conn->type == ACL_LINK) {
+ /* ACL link notify L2CAP layer */
+ if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->disconn_ind)
+ hp->disconn_ind(conn, dc->reason);
+ } else {
+ /* SCO link (no notification) */
+ }
X
X hci_conn_del(hdev, conn);
X
@@ -1324,13 +1725,13 @@
X struct hci_conn *conn;
X __u16 handle, count;
X
- handle = __le16_to_cpu(*ptr++);
- count = __le16_to_cpu(*ptr++);
+ handle = __le16_to_cpu(get_unaligned(ptr++));
+ count = __le16_to_cpu(get_unaligned(ptr++));
X
X hdev->acl_cnt += count;
X
X if ((conn = conn_hash_lookup(&hdev->conn_hash, handle)))
- conn->acl_sent -= count;
+ conn->sent -= count;
X }
X
X tasklet_enable(&hdev->tx_task);
@@ -1338,7 +1739,7 @@
X hci_sched_tx(hdev);
X }
X
-static __inline__ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static inline void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
X {
X hci_event_hdr *he = (hci_event_hdr *) skb->data;
X evt_cmd_status *cs;
@@ -1382,11 +1783,6 @@
X ogf = cmd_opcode_ogf(opcode);
X ocf = cmd_opcode_ocf(opcode);
X
- if (cs->ncmd) {
- atomic_set(&hdev->cmd_cnt, 1);
- hci_sched_cmd(hdev);
- }
-
X switch (ogf) {
X case OGF_INFO_PARAM:
X hci_cs_info_param(hdev, ocf, cs->status);
@@ -1409,6 +1805,11 @@
X break;
X };
X
+ if (cs->ncmd) {
+ atomic_set(&hdev->cmd_cnt, 1);
+ if (!skb_queue_empty(&hdev->cmd_q))
+ hci_sched_cmd(hdev);
+ }
X break;
X
X case EVT_CMD_COMPLETE:
@@ -1419,11 +1820,6 @@
X ogf = cmd_opcode_ogf(opcode);
X ocf = cmd_opcode_ocf(opcode);
X
- if (ec->ncmd) {
- atomic_set(&hdev->cmd_cnt, 1);
- hci_sched_cmd(hdev);
- }
-
X switch (ogf) {
X case OGF_INFO_PARAM:
X hci_cc_info_param(hdev, ocf, skb);
@@ -1446,15 +1842,20 @@
X break;
X };
X
+ if (ec->ncmd) {
+ atomic_set(&hdev->cmd_cnt, 1);
+ if (!skb_queue_empty(&hdev->cmd_q))
+ hci_sched_cmd(hdev);
+ }
X break;
X };
X
- bluez_skb_free(skb);
+ kfree_skb(skb);
X hdev->stat.evt_rx++;
X }
X
X /* ACL data packet */
-static __inline__ void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
X {
X hci_acl_hdr *ah = (void *) skb->data;
X struct hci_conn *conn;
@@ -1469,38 +1870,28 @@
X DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
X
X if ((conn = conn_hash_lookup(&hdev->conn_hash, handle))) {
- struct hci_proto *hp;
+ register struct hci_proto *hp;
X
X /* Send to upper protocol */
- if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->recv_acldata)
+ if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->recv_acldata) {
X hp->recv_acldata(conn, skb, flags);
- else
- bluez_skb_free(skb);
+ goto sent;
+ }
+ } else {
+ ERR("%s ACL packet for unknown connection handle %d", hdev->name, handle);
X }
X
+ kfree_skb(skb);
+sent:
X hdev->stat.acl_rx++;
X }
X
X /* SCO data packet */
-static __inline__ void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
X {
- hci_sco_hdr *sh = (void *) skb->data;
- struct hci_conn *conn;
-
- skb_pull(skb, HCI_SCO_HDR_SIZE);
-
X DBG("%s len %d", hdev->name, skb->len);
X
- if ((conn = conn_hash_lookup(&hdev->conn_hash, __le16_to_cpu(sh->handle)))) {
- struct hci_proto *hp;
-
- /* Send to upper protocol */
- if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->recv_acldata)
- hp->recv_scodata(conn, skb);
- else
- bluez_skb_free(skb);
- }
-
+ kfree_skb(skb);
X hdev->stat.sco_rx++;
X }
X
@@ -1525,13 +1916,13 @@
X switch (skb->pkt_type) {
X case HCI_ACLDATA_PKT:
X case HCI_SCODATA_PKT:
- bluez_skb_free(skb);
+ kfree_skb(skb);
X continue;
X };
X }
X
X if (hdev->flags & HCI_NORMAL) {
- /* Handle frame */
+ /* Process frame */
X switch (skb->pkt_type) {
X case HCI_EVENT_PKT:
X hci_event_packet(hdev, skb);
@@ -1548,11 +1939,11 @@
X break;
X
X default:
- bluez_skb_free(skb);
+ kfree_skb(skb);
X break;
X };
X } else {
- bluez_skb_free(skb);
+ kfree_skb(skb);
X }
X }
X
@@ -1574,7 +1965,7 @@
X
X hci_sched_sco(hdev);
X
- /* Send next queued raw(unknown type) packet */
+ /* Send next queued raw (unknown type) packet */
X while ((skb = skb_dequeue(&hdev->raw_q)))
X hci_send_frame(skb);
X
@@ -1590,10 +1981,10 @@
X
X /* Send queued commands */
X if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
- if (hdev->cmd_sent)
- bluez_skb_free(hdev->cmd_sent);
+ if (hdev->sent_cmd)
+ kfree_skb(hdev->sent_cmd);
X
- if ((hdev->cmd_sent = bluez_skb_clone(skb, GFP_ATOMIC))) {
+ if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
X atomic_dec(&hdev->cmd_cnt);
X hci_send_frame(skb);
X } else {
@@ -1609,7 +2000,7 @@
X struct hci_dev *hdev = (struct hci_dev *) skb->dev;
X
X if (!hdev || !(hdev->flags & (HCI_UP | HCI_INIT))) {
- bluez_skb_free(skb);
+ kfree_skb(skb);
X return -1;
X }
X
@@ -1623,237 +2014,6 @@
X hci_sched_rx(hdev);
X
X return 0;
-}
-
-/* ----- HCI Ioctl helpers ----- */
-int hci_dev_reset(__u16 dev)
-{
- struct hci_dev *hdev;
- int ret = 0;
-
- if (!(hdev = hci_dev_get(dev)))
- return -ENODEV;
-
- hci_req_lock(hdev);
- tasklet_disable(&hdev->tx_task);
-
- if (!(hdev->flags & HCI_UP))
- goto done;
-
- /* Drop queues */
- bluez_skb_queue_purge(&hdev->rx_q);
- bluez_skb_queue_purge(&hdev->cmd_q);
-
- inquiry_cache_flush(&hdev->inq_cache);
-
- hci_conn_hash_flush(hdev);
-
- if (hdev->flush)
- hdev->flush(hdev);
-
- atomic_set(&hdev->cmd_cnt, 1);
- hdev->acl_cnt = 0; hdev->sco_cnt = 0;
-
- ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
-
-done:
- tasklet_enable(&hdev->tx_task);
- hci_req_unlock(hdev);
- hci_dev_put(hdev);
-
- return ret;
-}
-
-int hci_dev_reset_stat(__u16 dev)
-{
- struct hci_dev *hdev;
- int ret = 0;
-
- if (!(hdev = hci_dev_get(dev)))
- return -ENODEV;
-
- memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
-
- hci_dev_put(hdev);
-
- return ret;
-}
-
-int hci_dev_setauth(unsigned long arg)
-{
- struct hci_dev *hdev;
- struct hci_dev_req dr;
- int ret = 0;
-
- if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
- return -EFAULT;
-
- if (!(hdev = hci_dev_get(dr.dev_id)))
- return -ENODEV;
-
- ret = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
-
- hci_dev_put(hdev);
-
- return ret;
-}
-
-int hci_dev_setscan(unsigned long arg)
-{
- struct hci_dev *hdev;
- struct hci_dev_req dr;
- int ret = 0;
-
- if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
- return -EFAULT;
-
- if (!(hdev = hci_dev_get(dr.dev_id)))
- return -ENODEV;
-
- ret = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
-
- hci_dev_put(hdev);
-
- return ret;
-}
-
-int hci_dev_list(unsigned long arg)
-{
- struct hci_dev_list_req *dl;
- struct hci_dev_req *dr;
- struct hci_dev *hdev;
- int i, n, size;
- __u16 dev_num;
-
- if (get_user(dev_num, (__u16 *) arg))
- return -EFAULT;
-
- size = dev_num * sizeof(struct hci_dev_req) + sizeof(__u16);
-
- if (verify_area(VERIFY_WRITE, (void *) arg, size))
- return -EFAULT;
-
- if (!(dl = kmalloc(size, GFP_KERNEL)))
- return -ENOMEM;
- dr = dl->dev_req;
-
- spin_lock_bh(&hdev_list_lock);
- for (i = 0, n = 0; i < HCI_MAX_DEV && n < dev_num; i++) {
- if ((hdev = hdev_list[i])) {
- (dr + n)->dev_id = hdev->id;
- (dr + n)->dev_opt = hdev->flags;
- n++;
- }
- }
- spin_unlock_bh(&hdev_list_lock);
-
- dl->dev_num = n;
- size = n * sizeof(struct hci_dev_req) + sizeof(__u16);
-
- copy_to_user((void *) arg, dl, size);
-
- return 0;
-}
-
-int hci_dev_info(unsigned long arg)
-{
- struct hci_dev *hdev;
- struct hci_dev_info di;
- int err = 0;
-
- if (copy_from_user(&di, (void *) arg, sizeof(di)))
- return -EFAULT;
-
- if (!(hdev = hci_dev_get(di.dev_id)))
- return -ENODEV;
-
- strcpy(di.name, hdev->name);
- di.type = hdev->type;
- di.flags = hdev->flags;
- di.acl_mtu = hdev->acl_mtu;
- di.acl_max = hdev->acl_max;
- di.sco_mtu = hdev->sco_mtu;
- di.sco_max = hdev->sco_max;
- di.bdaddr = hdev->bdaddr;
-
- memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
-
- if (copy_to_user((void *) arg, &di, sizeof(di)))
- err = -EFAULT;
-
- hci_dev_put(hdev);
-
- return err;
-}
-
-__u32 hci_dev_setmode(struct hci_dev *hdev, __u32 mode)
-{
- __u32 omode = hdev->flags & HCI_MODE_MASK;
-
- hdev->flags &= ~HCI_MODE_MASK;
- hdev->flags |= (mode & HCI_MODE_MASK);
-
- return omode;
-}
-
-__u32 hci_dev_getmode(struct hci_dev *hdev)
-{
- return hdev->flags & HCI_MODE_MASK;
-}
-
-int hci_inquiry(unsigned long arg)
-{
- struct inquiry_cache *cache;
- struct hci_inquiry_req ir;
- struct hci_dev *hdev;
- int err = 0, do_inquiry = 0;
- long timeo;
- __u8 *buf, *ptr;
-
- ptr = (void *) arg;
- if (copy_from_user(&ir, ptr, sizeof(ir)))
- return -EFAULT;
-
- if (!(hdev = hci_dev_get(ir.dev_id)))
- return -ENODEV;
-
- cache = &hdev->inq_cache;
-
- inquiry_cache_lock(cache);
- if (inquiry_cache_age(cache) > INQUIRY_CACHE_AGE_MAX || ir.flags & IREQ_CACHE_FLUSH) {
- inquiry_cache_flush(cache);
- do_inquiry = 1;
- }
- inquiry_cache_unlock(cache);
-
- timeo = ir.length * 2 * HZ;
- if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
- goto done;
-
- /* cache_dump can't sleep. Therefore we allocate temp buffer and then
- * copy it to the user space.
- */
- if (!(buf = kmalloc(sizeof(inquiry_info) * ir.num_rsp, GFP_KERNEL))) {
- err = -ENOMEM;
- goto done;
- }
- ir.num_rsp = inquiry_cache_dump(cache, ir.num_rsp, buf);
-
- DBG("num_rsp %d", ir.num_rsp);
-
- if (!verify_area(VERIFY_WRITE, ptr, sizeof(ir) + (sizeof(inquiry_info) * ir.num_rsp))) {
- copy_to_user(ptr, &ir, sizeof(ir));
- ptr += sizeof(ir);
- copy_to_user(ptr, buf, sizeof(inquiry_info) * ir.num_rsp);
- } else
- err = -EFAULT;
-
- kfree(buf);
-
-done:
- hci_dev_put(hdev);
-
- return err;
X }
X
X int hci_core_init(void)
diff -u --recursive --new-file v2.4.9/linux/net/bluetooth/hci_sock.c linux/net/bluetooth/hci_sock.c
--- v2.4.9/linux/net/bluetooth/hci_sock.c Tue Jul 3 17:08:22 2001
+++ linux/net/bluetooth/hci_sock.c Fri Sep 7 09:28:38 2001
@@ -25,7 +25,7 @@
X /*
X * BlueZ HCI socket layer.
X *
- * $Id: hci_sock.c,v 1.1 2001/06/01 08:12:11 davem Exp $
+ * $Id: hci_sock.c,v 1.9 2001/08/05 06:02:16 maxk Exp $
X */
X
X #include <linux/config.h>
@@ -36,7 +36,7 @@
X #include <linux/kernel.h>
X #include <linux/major.h>
X #include <linux/sched.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
X #include <linux/poll.h>
X #include <linux/fcntl.h>
X #include <linux/init.h>
@@ -81,13 +81,15 @@
X /* Send frame to RAW socket */
X void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
X {
- struct sk_buff *nskb;
X struct sock * sk;
X
X DBG("hdev %p len %d", hdev, skb->len);
X
X read_lock(&hci_sk_list.lock);
X for (sk = hci_sk_list.head; sk; sk = sk->next) {
+ struct hci_filter *flt;
+ struct sk_buff *nskb;
+
X if (sk->state != BT_BOUND || hci_pi(sk)->hdev != hdev)
X continue;
X
@@ -95,7 +97,20 @@
X if (skb->sk == sk)
X continue;
X
- if (!(nskb = bluez_skb_clone(skb, GFP_ATOMIC)))
+ /* Apply filter */
+ flt = &hci_pi(sk)->filter;
+
+ if (!test_bit(skb->pkt_type, &flt->type_mask))
+ continue;
+
+ if (skb->pkt_type == HCI_EVENT_PKT) {
+ register int evt = (*(__u8 *)skb->data & 63);
+
+ if (!test_bit(evt, &flt->event_mask))
+ continue;
+ }
+
+ if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
X continue;
X
X /* Put type byte before the data */
@@ -128,8 +143,8 @@
X
X sock_orphan(sk);
X
- bluez_skb_queue_purge(&sk->receive_queue);
- bluez_skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->write_queue);
X
X sock_put(sk);
X
@@ -144,7 +159,7 @@
X struct hci_dev *hdev = hci_pi(sk)->hdev;
X __u32 mode;
X
- DBG("cmd %x", cmd);
+ DBG("cmd %x arg %lx", cmd, arg);
X
X switch (cmd) {
X case HCIGETINFO:
@@ -156,37 +171,31 @@
X case HCIDEVUP:
X if (!capable(CAP_NET_ADMIN))
X return -EACCES;
-
X return hci_dev_open(arg);
X
X case HCIDEVDOWN:
X if (!capable(CAP_NET_ADMIN))
X return -EACCES;
-
X return hci_dev_close(arg);
X
X case HCIDEVRESET:
X if (!capable(CAP_NET_ADMIN))
X return -EACCES;
-
X return hci_dev_reset(arg);
X
X case HCIRESETSTAT:
X if (!capable(CAP_NET_ADMIN))
X return -EACCES;
-
X return hci_dev_reset_stat(arg);
X
X case HCISETSCAN:
X if (!capable(CAP_NET_ADMIN))
X return -EACCES;
-
X return hci_dev_setscan(arg);
X
X case HCISETAUTH:
X if (!capable(CAP_NET_ADMIN))
X return -EACCES;
-
X return hci_dev_setauth(arg);
X
X case HCISETRAW:
@@ -203,9 +212,17 @@
X
X return hci_dev_setmode(hdev, mode);
X
+ case HCISETPTYPE:
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+ return hci_dev_setptype(arg);
+
X case HCIINQUIRY:
X return hci_inquiry(arg);
X
+ case HCIGETCONNLIST:
+ return hci_conn_list(arg);
+
X default:
X return -EINVAL;
X };
@@ -291,11 +308,11 @@
X return len;
X }
X
-static __inline__ void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
X {
- __u32 flags = hci_pi(sk)->cmsg_flags;
+ __u32 mask = hci_pi(sk)->cmsg_mask;
X
- if (flags & HCI_CMSG_DIR)
+ if (mask & HCI_CMSG_DIR)
X put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(int), &bluez_cb(skb)->incomming);
X }
X
@@ -326,7 +343,7 @@
X skb->h.raw = skb->data;
X err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
X
- if (hci_pi(sk)->cmsg_flags)
+ if (hci_pi(sk)->cmsg_mask)
X hci_sock_cmsg(sk, msg, skb);
X
X skb_free_datagram(sk, skb);
@@ -334,24 +351,34 @@
X return err ? : copied;
X }
X
-int hci_sock_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen)
+int hci_sock_setsockopt(struct socket *sock, int level, int optname, char *optval, int len)
X {
X struct sock *sk = sock->sk;
- int err = 0, opt;
-
- if (get_user(opt, (int *)optval))
- return -EFAULT;
+ struct hci_filter flt;
+ int err = 0, opt = 0;
X
- DBG("sk %p, opt %d", sk, opt);
+ DBG("sk %p, opt %d", sk, optname);
X
X lock_sock(sk);
X
X switch (optname) {
X case HCI_DATA_DIR:
+ if (get_user(opt, (int *)optval))
+ return -EFAULT;
+
X if (opt)
- hci_pi(sk)->cmsg_flags |= HCI_CMSG_DIR;
+ hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
X else
- hci_pi(sk)->cmsg_flags &= ~HCI_CMSG_DIR;
+ hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
+ break;
+
+ case HCI_FILTER:
+ len = MIN(len, sizeof(struct hci_filter));
+ if (copy_from_user(&flt, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+ memcpy(&hci_pi(sk)->filter, &flt, len);
X break;
X
X default:
@@ -373,7 +400,7 @@
X
X switch (optname) {
X case HCI_DATA_DIR:
- if (hci_pi(sk)->cmsg_flags & HCI_CMSG_DIR)
+ if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
X opt = 1;
X else
X opt = 0;
@@ -382,6 +409,12 @@
X return -EFAULT;
X break;
X
+ case HCI_FILTER:
+ len = MIN(len, sizeof(struct hci_filter));
+ if (copy_to_user(optval, &hci_pi(sk)->filter, len))
+ return -EFAULT;
+ break;
+
X default:
X return -ENOPROTOOPT;
X break;
@@ -431,6 +464,11 @@
X sk->protocol = protocol;
X sk->state = BT_OPEN;
X
+ /* Initialize filter */
+ hci_pi(sk)->filter.type_mask = (1<<HCI_EVENT_PKT);
+ hci_pi(sk)->filter.event_mask[0] = ~0L;
+ hci_pi(sk)->filter.event_mask[1] = ~0L;
+
X bluez_sock_link(&hci_sk_list, sk);
X
X MOD_INC_USE_COUNT;
@@ -455,7 +493,7 @@
X memcpy(skb_put(skb, EVT_HCI_DEV_EVENT_SIZE), &he, EVT_HCI_DEV_EVENT_SIZE);
X
X hci_send_to_sock(NULL, skb);
- bluez_skb_free(skb);
+ kfree_skb(skb);
X }
X
X if (event == HCI_DEV_UNREG) {
diff -u --recursive --new-file v2.4.9/linux/net/bluetooth/l2cap_core.c linux/net/bluetooth/l2cap_core.c
--- v2.4.9/linux/net/bluetooth/l2cap_core.c Tue Jul 3 17:08:22 2001
+++ linux/net/bluetooth/l2cap_core.c Fri Sep 7 09:28:38 2001
@@ -25,8 +25,9 @@
X /*
X * BlueZ L2CAP core and sockets.
X *
- * $Id: l2cap_core.c,v 1.1 2001/06/01 08:12:11 davem Exp $
+ * $Id: l2cap_core.c,v 1.19 2001/08/03 04:19:50 maxk Exp $
X */
+#define VERSION "1.1"
X
X #include <linux/config.h>
X #include <linux/module.h>
@@ -36,7 +37,7 @@
X #include <linux/kernel.h>
X #include <linux/major.h>
X #include <linux/sched.h>
-#include <linux/malloc.h>
+#include <linux/slab.h>
X #include <linux/poll.h>
X #include <linux/fcntl.h>
X #include <linux/init.h>
@@ -73,7 +74,7 @@
X
X static int l2cap_conn_del(struct l2cap_conn *conn, int err);
X
-static __inline__ void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
+static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
X static void l2cap_chan_del(struct sock *sk, int err);
X static int l2cap_chan_send(struct sock *sk, struct msghdr *msg, int len);
X
@@ -176,7 +177,7 @@
X }
X
X /* ----- L2CAP timers ------ */
-static void l2cap_timeout(unsigned long arg)
+static void l2cap_sock_timeout(unsigned long arg)
X {
X struct sock *sk = (struct sock *) arg;
X
@@ -199,7 +200,7 @@
X sock_put(sk);
X }
X
-static void l2cap_set_timer(struct sock *sk, long timeout)
+static void l2cap_sock_set_timer(struct sock *sk, long timeout)
X {
X DBG("sock %p state %d timeout %ld", sk, sk->state, timeout);
X
@@ -207,7 +208,7 @@
X sock_hold(sk);
X }
X
-static void l2cap_clear_timer(struct sock *sk)
+static void l2cap_sock_clear_timer(struct sock *sk)
X {
X DBG("sock %p state %d", sk, sk->state);
X
@@ -215,13 +216,47 @@
X __sock_put(sk);
X }
X
-static void l2cap_init_timer(struct sock *sk)
+static void l2cap_sock_init_timer(struct sock *sk)
X {
X init_timer(&sk->timer);
- sk->timer.function = l2cap_timeout;
+ sk->timer.function = l2cap_sock_timeout;
X sk->timer.data = (unsigned long)sk;
X }
X
+static void l2cap_conn_timeout(unsigned long arg)
+{
+ struct l2cap_conn *conn = (void *)arg;
+
+ DBG("conn %p state %d", conn, conn->state);
+
+ if (conn->state == BT_CONNECTED) {
+ hci_disconnect(conn->hconn, 0x13);
+ }
+
+ return;
+}
+
+static void l2cap_conn_set_timer(struct l2cap_conn *conn, long timeout)
+{
+ DBG("conn %p state %d timeout %ld", conn, conn->state, timeout);
+
+ mod_timer(&conn->timer, jiffies + timeout);
+}
+
+static void l2cap_conn_clear_timer(struct l2cap_conn *conn)
+{
+ DBG("conn %p state %d", conn, conn->state);
+
+ del_timer(&conn->timer);
+}
+
+static void l2cap_conn_init_timer(struct l2cap_conn *conn)
+{
+ init_timer(&conn->timer);
+ conn->timer.function = l2cap_conn_timeout;
+ conn->timer.data = (unsigned long)conn;
+}
+
X /* -------- L2CAP connections --------- */
X /* Add new connection to the interface.
X * Interface must be locked
@@ -244,6 +279,8 @@
X spin_lock_init(&conn->lock);
X conn->chan_list.lock = RW_LOCK_UNLOCKED;
X
+ l2cap_conn_init_timer(conn);
+
X __l2cap_conn_link(iff, conn);
X
X DBG("%s -> %s, %p", batostr(src), batostr(dst), conn);
@@ -262,17 +299,18 @@
X
X DBG("conn %p, state %d, err %d", conn, conn->state, err);
X
+ l2cap_conn_clear_timer(conn);
X __l2cap_conn_unlink(conn->iff, conn);
X
X conn->state = BT_CLOSED;
X
X if (conn->rx_skb)
- bluez_skb_free(conn->rx_skb);
+ kfree_skb(conn->rx_skb);
X
X /* Kill channels */
X while ((sk = conn->chan_list.head)) {
X bh_lock_sock(sk);
- l2cap_clear_timer(sk);
+ l2cap_sock_clear_timer(sk);
X l2cap_chan_del(sk, err);
X bh_unlock_sock(sk);
X
@@ -285,11 +323,11 @@
X return 0;
X }
X
-static __inline__ struct l2cap_conn *l2cap_get_conn_by_addr(struct l2cap_iff *iff, bdaddr_t *dst)
+static inline struct l2cap_conn *l2cap_get_conn_by_addr(struct l2cap_iff *iff, bdaddr_t *dst)
X {
X struct list_head *p;
X
- list_for_each(p, &iff->conn_list){
+ list_for_each(p, &iff->conn_list) {
X struct l2cap_conn *c;
X
X c = list_entry(p, struct l2cap_conn, list);
@@ -337,7 +375,7 @@
X l2cap_chan_add(conn, sk, NULL);
X
X sk->state = BT_CONNECT;
- l2cap_set_timer(sk, sk->sndtimeo);
+ l2cap_sock_set_timer(sk, sk->sndtimeo);
X
X switch (conn->state) {
X case BT_CONNECTED:
@@ -347,8 +385,8 @@
X req.psm = l2cap_pi(sk)->psm;
X l2cap_send_req(conn, L2CAP_CONN_REQ, L2CAP_CONN_REQ_SIZE, &req);
X } else {
+ l2cap_sock_clear_timer(sk);
X sk->state = BT_CONNECTED;
- l2cap_clear_timer(sk);
X }
X break;
X
@@ -364,7 +402,6 @@
X
X done:
X read_unlock_bh(&l2cap_rt_lock);
-
X return err;
X }
X
@@ -425,7 +462,7 @@
X struct l2cap_accept_q *q = &l2cap_pi(parent)->accept_q;
X struct sock *sk;
X
- for (sk = q->head; sk; sk = l2cap_pi(sk)->next_q){
+ for (sk = q->head; sk; sk = l2cap_pi(sk)->next_q) {
X if (!state || sk->state == state) {
X l2cap_accept_unlink(sk);
X break;
@@ -490,8 +527,8 @@
X {
X DBG("sk %p", sk);
X
- bluez_skb_queue_purge(&sk->receive_queue);
- bluez_skb_queue_purge(&sk->write_queue);
+ skb_queue_purge(&sk->receive_queue);
+ skb_queue_purge(&sk->write_queue);
X
X MOD_DEC_USE_COUNT;
X }
@@ -533,6 +570,8 @@
X {
X struct l2cap_conn *conn;
X
+ l2cap_sock_clear_timer(sk);
+
X lock_sock(sk);
X
X conn = l2cap_pi(sk)->conn;
@@ -555,7 +594,7 @@
X req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
X l2cap_send_req(conn, L2CAP_DISCONN_REQ, L2CAP_DISCONN_REQ_SIZE, &req);
X
- l2cap_set_timer(sk, sk->sndtimeo);
+ l2cap_sock_set_timer(sk, sk->sndtimeo);
X } else {
X l2cap_chan_del(sk, ECONNRESET);
X }
@@ -614,7 +653,7 @@
X sk->protocol = proto;
X sk->state = BT_OPEN;
X
- l2cap_init_timer(sk);
+ l2cap_sock_init_timer(sk);
X
X bluez_sock_link(&l2cap_sk_list, sk);
X
@@ -689,6 +728,8 @@
X long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
X int err = 0;
X
+ DBG("sk %p", sk);
+
X add_wait_queue(sk->sleep, &wait);
X current->state = TASK_INTERRUPTIBLE;
X
@@ -951,7 +992,6 @@
X };
X
X release_sock(sk);
-
X return err;
X }
X
@@ -959,11 +999,14 @@
X {
X struct sock *sk = sock->sk;
X struct l2cap_options opts;
- int len;
+ struct l2cap_conninfo cinfo;
+ int len, err = 0;
X
X if (get_user(len, optlen))
X return -EFAULT;
X
+ lock_sock(sk);
+
X switch (optname) {
X case L2CAP_OPTIONS:
X opts.imtu = l2cap_pi(sk)->imtu;
@@ -972,16 +1015,63 @@
X
X len = MIN(len, sizeof(opts));
X if (copy_to_user(optval, (char *)&opts, len))
- return -EFAULT;
+ err = -EFAULT;
+
+ break;
+
+ case L2CAP_CONNINFO:
+ if (sk->state != BT_CONNECTED) {
+ err = -ENOTCONN;
+ break;
+ }
+
+ cinfo.hci_handle = l2cap_pi(sk)->conn->hconn->handle;
+
+ len = MIN(len, sizeof(cinfo));
+ if (copy_to_user(optval, (char *)&cinfo, len))
+ err = -EFAULT;
X
X break;
X
X default:
- return -ENOPROTOOPT;
+ err = -ENOPROTOOPT;
X break;
X };
X
- return 0;
+ release_sock(sk);
+ return err;
+}
+
+static unsigned int l2cap_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct l2cap_accept_q *aq;
+ unsigned int mask;
+
+ DBG("sock %p, sk %p", sock, sk);
+
+ poll_wait(file, sk->sleep, wait);
+ mask = 0;
+
+ if (sk->err || !skb_queue_empty(&sk->error_queue))
+ mask |= POLLERR;
+
+ if (sk->shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+
+ aq = &l2cap_pi(sk)->accept_q;
+ if (!skb_queue_empty(&sk->receive_queue) || aq->head || (sk->shutdown & RCV_SHUTDOWN))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (sk->state == BT_CLOSED)
+ mask |= POLLHUP;
SHAR_EOF
true || echo 'restore of patch-2.4.10 failed'
fi
echo 'End of part 194'
echo 'File patch-2.4.10 is continued in part 195'
echo "195" > _shar_seq_.tmp
exit 0