From d201868a65fe93e00846f6bd82e58a3057745fc7 Mon Sep 17 00:00:00 2001 From: Elvis Pranskevichus Date: Sun, 26 Aug 2018 16:49:28 -0400 Subject: [PATCH] Suggest copy_records_to_table() for the purpose of bulk insert. Connection.executemany() is not the fastest choice for bulk insert, copy_records_to_table() is a better choice, so make it easier to find by putting a note in executemany() documentation. See #346 for an example of a performance confusion. --- asyncpg/connection.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/asyncpg/connection.py b/asyncpg/connection.py index 8f551a02..99baefde 100644 --- a/asyncpg/connection.py +++ b/asyncpg/connection.py @@ -291,6 +291,12 @@ async def executemany(self, command: str, args, *, timeout: float=None): :param float timeout: Optional timeout value in seconds. :return None: This method discards the results of the operations. + .. note:: + + When inserting a large number of rows, + use :meth:`Connection.copy_records_to_table()` instead, + it is much more efficient for this purpose. + .. versionadded:: 0.7.0 .. versionchanged:: 0.11.0