Here's a script that gets as close as possible to what you describe. this uses a psycopg2 server side cursor, queries for 300 rows and fetches in groups of 10 at a time, cutting off the result right at the 15th batch. it doesn't show any problem, but im not sure what else you have going on that triggers it. so see if you can this program to illustrate the problem.
from sqlalchemy import Column
from sqlalchemy import create_engine
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import Session
Base = declarative_base()
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
data = Column(String)
e = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
Base.metadata.drop_all(e)
Base.metadata.create_all(e)
s = Session(e)
s.add_all([A(data=f"{i}") for i in range(300)])
s.commit()
# SS cursor, will fetch every 10 rows
x = iter(s.query(A).yield_per(10))
for i in range(157):
o = next(x)
s.commit()
del x
import gc
gc.collect()
print("done")