@@ -139,25 +139,29 @@ def _bulk_retrieve_from_salesforce(
139
139
140
140
query = _build_bulk_query (sf_client , sf_type , time_filter )
141
141
142
- bulk_2_handler = SFBulk2Handler (
142
+ bulk_2_handler : SFBulk2Handler | None = SFBulk2Handler (
143
143
session_id = sf_client .session_id ,
144
144
bulk2_url = sf_client .bulk2_url ,
145
145
proxies = sf_client .proxies ,
146
146
session = sf_client .session ,
147
147
)
148
+ if not bulk_2_handler :
149
+ return sf_type , None
148
150
149
151
# NOTE(rkuo): there are signs this download is allocating large
150
152
# amounts of memory instead of streaming the results to disk.
151
153
# we're doing a gc.collect to try and mitigate this.
152
154
153
155
# see https://github.com/simple-salesforce/simple-salesforce/issues/428 for a
154
156
# possible solution
155
- bulk_2_type = SFBulk2Type (
157
+ bulk_2_type : SFBulk2Type | None = SFBulk2Type (
156
158
object_name = sf_type ,
157
159
bulk2_url = bulk_2_handler .bulk2_url ,
158
160
headers = bulk_2_handler .headers ,
159
161
session = bulk_2_handler .session ,
160
162
)
163
+ if not bulk_2_type :
164
+ return sf_type , None
161
165
162
166
logger .info (f"Downloading { sf_type } " )
163
167
@@ -168,7 +172,7 @@ def _bulk_retrieve_from_salesforce(
168
172
results = bulk_2_type .download (
169
173
query = query ,
170
174
path = target_dir ,
171
- max_records = 1000000 ,
175
+ max_records = 500000 ,
172
176
)
173
177
174
178
# prepend each downloaded csv with the object type (delimiter = '.')
@@ -187,6 +191,8 @@ def _bulk_retrieve_from_salesforce(
187
191
logger .warning (f"Exceptioning query for object type { sf_type } : { query } " )
188
192
return sf_type , None
189
193
finally :
194
+ bulk_2_handler = None
195
+ bulk_2_type = None
190
196
gc .collect ()
191
197
192
198
logger .info (f"Downloaded { sf_type } to { all_download_paths } " )
0 commit comments