debug
#syz test: upstream c0ecd6388360
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index a688d4c75d99..533738844f0c 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -110,6 +110,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (wback_to_cache && !WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
+ printk("ino: %lx, folio: %p, %s\n", folio->mapping->host->i_ino, folio, __func__);
filemap_dirty_folio(folio->mapping, folio);
}
}
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 4726c315453c..8e4804f24f06 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -392,6 +392,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
folio_mark_dirty(folio);
folio_unlock(folio);
} else {
+ printk("ino: %lx, folio: %p, %s\n", wreq->inode->i_ino, folio, __func__);
netfs_advance_writethrough(wreq, &wbc, folio, copied,
offset + copied == flen,
&writethrough);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 83e644bd518f..cb4b16c8a129 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -28,6 +28,7 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
_enter("");
+ printk("ino: %lx, folio: %p, %s\n", inode->i_ino, folio, __func__);
if (!filemap_dirty_folio(mapping, folio))
return false;
if (!fscache_cookie_valid(cookie))
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 9258d30cffe3..0f3c314d27d3 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -35,6 +35,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include "internal.h"
+#include <linux/backing-dev.h>
/*
* Kill all dirty folios in the event of an unrecoverable error, starting with
@@ -74,6 +75,7 @@ static void netfs_kill_dirty_pages(struct address_space *mapping,
trace_netfs_folio(folio, why);
+ printk("ino: %lx, folio: %p, %s\n", mapping->host->i_ino, folio, __func__);
folio_start_writeback(folio);
folio_unlock(folio);
folio_end_writeback(folio);
@@ -331,6 +333,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (fpos >= i_size) {
/* mmap beyond eof. */
_debug("beyond eof");
+ printk("1 ino: %lx, folio: %p, %s\n", wreq->inode->i_ino, folio, __func__);
folio_start_writeback(folio);
folio_unlock(folio);
wreq->nr_group_rel += netfs_folio_written_back(folio);
@@ -403,8 +406,10 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
* from write-through, then the page has already been put into the wb
* state.
*/
- if (wreq->origin == NETFS_WRITEBACK)
+ if (wreq->origin == NETFS_WRITEBACK) {
+ printk("2 ino: %lx, folio: %p, %s\n", wreq->inode->i_ino, folio, __func__);
folio_start_writeback(folio);
+ }
folio_unlock(folio);
if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
@@ -503,6 +508,15 @@ int netfs_writepages(struct address_space *mapping,
struct folio *folio;
int error = 0;
+ if (!mapping_can_writeback(mapping) ||
+ !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+ printk("ino %lx can not wb: %d or mapping tagged :%d, %s\n",
+ mapping->host->i_ino, mapping_can_writeback(mapping),
+ mapping_tagged(mapping, PAGECACHE_TAG_DIRTY),
+ __func__);
+ return 0;
+ }
+
if (wbc->sync_mode == WB_SYNC_ALL)
mutex_lock(&ictx->wb_lock);
else if (!mutex_trylock(&ictx->wb_lock))
@@ -522,6 +536,8 @@ int netfs_writepages(struct address_space *mapping,
trace_netfs_write(wreq, netfs_write_trace_writeback);
netfs_stat(&netfs_n_wh_writepages);
+ printk("ino state: %lu, ino: %lx, comm: %s, folio: %p, %s\n", wreq->inode->i_state,
+ wreq->inode->i_ino, current->comm, folio, __func__);
do {
_debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
@@ -551,6 +567,7 @@ int netfs_writepages(struct address_space *mapping,
return error;
couldnt_start:
+ printk("ino: %lx, folio: %p, error: %d, %s\n", mapping->host->i_ino, folio, error, __func__);
netfs_kill_dirty_pages(mapping, wbc, folio);
out:
mutex_unlock(&ictx->wb_lock);
@@ -600,6 +617,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
folio_clear_dirty_for_io(folio);
/* We can make multiple writes to the folio... */
+ printk("ino: %lx, folio: %p, %s\n", wreq->inode->i_ino, folio, __func__);
folio_start_writeback(folio);
if (wreq->len == 0)
trace_netfs_folio(folio, netfs_folio_trace_wthru);
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index a97ceb105cd8..7768cc70439d 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -34,6 +34,7 @@ static void v9fs_begin_writeback(struct netfs_io_request *wreq)
{
struct p9_fid *fid;
+ printk("ino: %lx, %s\n", wreq->inode->i_ino, __func__);
fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
if (!fid) {
WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index e0d34e4e9076..84c3d83439d9 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -215,12 +215,18 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
inode, filp, fid ? fid->fid : -1);
if (fid) {
- if ((S_ISREG(inode->i_mode)) && (filp->f_mode & FMODE_WRITE))
- retval = filemap_fdatawrite(inode->i_mapping);
+ if ((S_ISREG(inode->i_mode)) && (filp->f_mode & FMODE_WRITE)) {
+ printk("ino: %lx, comm: %s, %s\n", inode->i_ino, current->comm, __func__);
+ if (!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
+ retval = filemap_fdatawrite(inode->i_mapping);
+ }
+ printk("del, ino: %lx, ino state: %lu, comm: %s, fid refcount: %d, %s\n", inode->i_ino, inode->i_state, current->comm, refcount_read(&fid->count), __func__);
- spin_lock(&inode->i_lock);
- hlist_del(&fid->ilist);
- spin_unlock(&inode->i_lock);
+ if (refcount_read(&fid->count) == 1) {
+ spin_lock(&inode->i_lock);
+ hlist_del(&fid->ilist);
+ spin_unlock(&inode->i_lock);
+ }
put_err = p9_fid_put(fid);
retval = retval < 0 ? retval : put_err;
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 348cc90bf9c5..5b2a77bf1e5e 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -44,6 +44,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
struct p9_fid *fid;
int omode;
+ pr_info("ino: %lx, %s\n", inode->i_ino, __func__);
p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
v9ses = v9fs_inode2v9ses(inode);
if (v9fs_proto_dotl(v9ses))
@@ -461,6 +462,7 @@ v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
p9_debug(P9_DEBUG_MMAP, "filp :%p\n", filp);
+ pr_info("ino: %lx, comm: %s, %s\n", inode->i_ino, current->comm, __func__);
if (!(v9ses->cache & CACHE_WRITEBACK)) {
p9_debug(P9_DEBUG_CACHE, "(read-only mmap mode)");
diff --git a/mm/filemap.c b/mm/filemap.c
index d62150418b91..5112cf69bce2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -394,6 +394,7 @@ int filemap_fdatawrite_wbc(struct address_space *mapping,
return 0;
wbc_attach_fdatawrite_inode(wbc, mapping->host);
+ printk("ino: %lx, comm: %s, %s\n", mapping->host->i_ino, current->comm, __func__);
ret = do_writepages(mapping, wbc);
wbc_detach_inode(wbc);
return ret;
@@ -427,17 +428,20 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
.range_end = end,
};
+ printk("ino: %lx, comm: %s, %s\n", mapping->host->i_ino, current->comm, __func__);
return filemap_fdatawrite_wbc(mapping, &wbc);
}
static inline int __filemap_fdatawrite(struct address_space *mapping,
int sync_mode)
{
+ printk("ino: %lx, comm: %s, %s\n", mapping->host->i_ino, current->comm, __func__);
return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
}
int filemap_fdatawrite(struct address_space *mapping)
{
+ printk("ino: %lx, comm: %s, %s\n", mapping->host->i_ino, current->comm, __func__);
return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
}
EXPORT_SYMBOL(filemap_fdatawrite);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4430ac68e4c4..043809a4cf9e 100644
@@ -2906,6 +2916,7 @@ bool folio_mark_dirty(struct folio *folio)
*/
if (folio_test_reclaim(folio))
folio_clear_reclaim(folio);
+ printk("ino: %lx, folio: %p, %s\n", mapping->host->i_ino, folio, __func__);
return mapping->a_ops->dirty_folio(mapping, folio);
}
@@ -3148,8 +3159,12 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
*/
if (mapping->host && !on_wblist)
sb_mark_inode_writeback(mapping->host);
- if (!folio_test_dirty(folio))
+ if (!folio_test_dirty(folio)) {
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
+ printk("ino: %lx, comm: %s, mapping tagged :%d, folio: %p, %s\n",
+ mapping->host->i_ino,
+ current->comm, mapping_tagged(mapping, PAGECACHE_TAG_DIRTY), folio, __func__);