persistence.py 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460
  1. # orm/persistence.py
  2. # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
  3. # <see AUTHORS file>
  4. #
  5. # This module is part of SQLAlchemy and is released under
  6. # the MIT License: http://www.opensource.org/licenses/mit-license.php
  7. """private module containing functions used to emit INSERT, UPDATE
  8. and DELETE statements on behalf of a :class:`.Mapper` and its descending
  9. mappers.
  10. The functions here are called only by the unit of work functions
  11. in unitofwork.py.
  12. """
  13. import operator
  14. from itertools import groupby, chain
  15. from .. import sql, util, exc as sa_exc
  16. from . import attributes, sync, exc as orm_exc, evaluator
  17. from .base import state_str, _attr_as_key, _entity_descriptor
  18. from ..sql import expression
  19. from ..sql.base import _from_objects
  20. from . import loading
  21. def _bulk_insert(
  22. mapper, mappings, session_transaction, isstates, return_defaults,
  23. render_nulls):
  24. base_mapper = mapper.base_mapper
  25. cached_connections = _cached_connection_dict(base_mapper)
  26. if session_transaction.session.connection_callable:
  27. raise NotImplementedError(
  28. "connection_callable / per-instance sharding "
  29. "not supported in bulk_insert()")
  30. if isstates:
  31. if return_defaults:
  32. states = [(state, state.dict) for state in mappings]
  33. mappings = [dict_ for (state, dict_) in states]
  34. else:
  35. mappings = [state.dict for state in mappings]
  36. else:
  37. mappings = list(mappings)
  38. connection = session_transaction.connection(base_mapper)
  39. for table, super_mapper in base_mapper._sorted_tables.items():
  40. if not mapper.isa(super_mapper):
  41. continue
  42. records = (
  43. (None, state_dict, params, mapper,
  44. connection, value_params, has_all_pks, has_all_defaults)
  45. for
  46. state, state_dict, params, mp,
  47. conn, value_params, has_all_pks,
  48. has_all_defaults in _collect_insert_commands(table, (
  49. (None, mapping, mapper, connection)
  50. for mapping in mappings),
  51. bulk=True, return_defaults=return_defaults,
  52. render_nulls=render_nulls
  53. )
  54. )
  55. _emit_insert_statements(base_mapper, None,
  56. cached_connections,
  57. super_mapper, table, records,
  58. bookkeeping=return_defaults)
  59. if return_defaults and isstates:
  60. identity_cls = mapper._identity_class
  61. identity_props = [p.key for p in mapper._identity_key_props]
  62. for state, dict_ in states:
  63. state.key = (
  64. identity_cls,
  65. tuple([dict_[key] for key in identity_props])
  66. )
  67. def _bulk_update(mapper, mappings, session_transaction,
  68. isstates, update_changed_only):
  69. base_mapper = mapper.base_mapper
  70. cached_connections = _cached_connection_dict(base_mapper)
  71. search_keys = mapper._primary_key_propkeys
  72. if mapper._version_id_prop:
  73. search_keys = set([mapper._version_id_prop.key]).union(search_keys)
  74. def _changed_dict(mapper, state):
  75. return dict(
  76. (k, v)
  77. for k, v in state.dict.items() if k in state.committed_state or k
  78. in search_keys
  79. )
  80. if isstates:
  81. if update_changed_only:
  82. mappings = [_changed_dict(mapper, state) for state in mappings]
  83. else:
  84. mappings = [state.dict for state in mappings]
  85. else:
  86. mappings = list(mappings)
  87. if session_transaction.session.connection_callable:
  88. raise NotImplementedError(
  89. "connection_callable / per-instance sharding "
  90. "not supported in bulk_update()")
  91. connection = session_transaction.connection(base_mapper)
  92. for table, super_mapper in base_mapper._sorted_tables.items():
  93. if not mapper.isa(super_mapper):
  94. continue
  95. records = _collect_update_commands(None, table, (
  96. (None, mapping, mapper, connection,
  97. (mapping[mapper._version_id_prop.key]
  98. if mapper._version_id_prop else None))
  99. for mapping in mappings
  100. ), bulk=True)
  101. _emit_update_statements(base_mapper, None,
  102. cached_connections,
  103. super_mapper, table, records,
  104. bookkeeping=False)
  105. def save_obj(
  106. base_mapper, states, uowtransaction, single=False):
  107. """Issue ``INSERT`` and/or ``UPDATE`` statements for a list
  108. of objects.
  109. This is called within the context of a UOWTransaction during a
  110. flush operation, given a list of states to be flushed. The
  111. base mapper in an inheritance hierarchy handles the inserts/
  112. updates for all descendant mappers.
  113. """
  114. # if batch=false, call _save_obj separately for each object
  115. if not single and not base_mapper.batch:
  116. for state in _sort_states(states):
  117. save_obj(base_mapper, [state], uowtransaction, single=True)
  118. return
  119. states_to_update = []
  120. states_to_insert = []
  121. cached_connections = _cached_connection_dict(base_mapper)
  122. for (state, dict_, mapper, connection,
  123. has_identity,
  124. row_switch, update_version_id) in _organize_states_for_save(
  125. base_mapper, states, uowtransaction
  126. ):
  127. if has_identity or row_switch:
  128. states_to_update.append(
  129. (state, dict_, mapper, connection, update_version_id)
  130. )
  131. else:
  132. states_to_insert.append(
  133. (state, dict_, mapper, connection)
  134. )
  135. for table, mapper in base_mapper._sorted_tables.items():
  136. if table not in mapper._pks_by_table:
  137. continue
  138. insert = _collect_insert_commands(table, states_to_insert)
  139. update = _collect_update_commands(
  140. uowtransaction, table, states_to_update)
  141. _emit_update_statements(base_mapper, uowtransaction,
  142. cached_connections,
  143. mapper, table, update)
  144. _emit_insert_statements(base_mapper, uowtransaction,
  145. cached_connections,
  146. mapper, table, insert)
  147. _finalize_insert_update_commands(
  148. base_mapper, uowtransaction,
  149. chain(
  150. (
  151. (state, state_dict, mapper, connection, False)
  152. for state, state_dict, mapper, connection in states_to_insert
  153. ),
  154. (
  155. (state, state_dict, mapper, connection, True)
  156. for state, state_dict, mapper, connection,
  157. update_version_id in states_to_update
  158. )
  159. )
  160. )
  161. def post_update(base_mapper, states, uowtransaction, post_update_cols):
  162. """Issue UPDATE statements on behalf of a relationship() which
  163. specifies post_update.
  164. """
  165. cached_connections = _cached_connection_dict(base_mapper)
  166. states_to_update = list(_organize_states_for_post_update(
  167. base_mapper,
  168. states, uowtransaction))
  169. for table, mapper in base_mapper._sorted_tables.items():
  170. if table not in mapper._pks_by_table:
  171. continue
  172. update = (
  173. (state, state_dict, sub_mapper, connection)
  174. for
  175. state, state_dict, sub_mapper, connection in states_to_update
  176. if table in sub_mapper._pks_by_table
  177. )
  178. update = _collect_post_update_commands(base_mapper, uowtransaction,
  179. table, update,
  180. post_update_cols)
  181. _emit_post_update_statements(base_mapper, uowtransaction,
  182. cached_connections,
  183. mapper, table, update)
  184. def delete_obj(base_mapper, states, uowtransaction):
  185. """Issue ``DELETE`` statements for a list of objects.
  186. This is called within the context of a UOWTransaction during a
  187. flush operation.
  188. """
  189. cached_connections = _cached_connection_dict(base_mapper)
  190. states_to_delete = list(_organize_states_for_delete(
  191. base_mapper,
  192. states,
  193. uowtransaction))
  194. table_to_mapper = base_mapper._sorted_tables
  195. for table in reversed(list(table_to_mapper.keys())):
  196. mapper = table_to_mapper[table]
  197. if table not in mapper._pks_by_table:
  198. continue
  199. elif mapper.inherits and mapper.passive_deletes:
  200. continue
  201. delete = _collect_delete_commands(base_mapper, uowtransaction,
  202. table, states_to_delete)
  203. _emit_delete_statements(base_mapper, uowtransaction,
  204. cached_connections, mapper, table, delete)
  205. for state, state_dict, mapper, connection, \
  206. update_version_id in states_to_delete:
  207. mapper.dispatch.after_delete(mapper, connection, state)
  208. def _organize_states_for_save(base_mapper, states, uowtransaction):
  209. """Make an initial pass across a set of states for INSERT or
  210. UPDATE.
  211. This includes splitting out into distinct lists for
  212. each, calling before_insert/before_update, obtaining
  213. key information for each state including its dictionary,
  214. mapper, the connection to use for the execution per state,
  215. and the identity flag.
  216. """
  217. for state, dict_, mapper, connection in _connections_for_states(
  218. base_mapper, uowtransaction,
  219. states):
  220. has_identity = bool(state.key)
  221. instance_key = state.key or mapper._identity_key_from_state(state)
  222. row_switch = update_version_id = None
  223. # call before_XXX extensions
  224. if not has_identity:
  225. mapper.dispatch.before_insert(mapper, connection, state)
  226. else:
  227. mapper.dispatch.before_update(mapper, connection, state)
  228. if mapper._validate_polymorphic_identity:
  229. mapper._validate_polymorphic_identity(mapper, state, dict_)
  230. # detect if we have a "pending" instance (i.e. has
  231. # no instance_key attached to it), and another instance
  232. # with the same identity key already exists as persistent.
  233. # convert to an UPDATE if so.
  234. if not has_identity and \
  235. instance_key in uowtransaction.session.identity_map:
  236. instance = \
  237. uowtransaction.session.identity_map[instance_key]
  238. existing = attributes.instance_state(instance)
  239. if not uowtransaction.was_already_deleted(existing):
  240. if not uowtransaction.is_deleted(existing):
  241. raise orm_exc.FlushError(
  242. "New instance %s with identity key %s conflicts "
  243. "with persistent instance %s" %
  244. (state_str(state), instance_key,
  245. state_str(existing)))
  246. base_mapper._log_debug(
  247. "detected row switch for identity %s. "
  248. "will update %s, remove %s from "
  249. "transaction", instance_key,
  250. state_str(state), state_str(existing))
  251. # remove the "delete" flag from the existing element
  252. uowtransaction.remove_state_actions(existing)
  253. row_switch = existing
  254. if (has_identity or row_switch) and mapper.version_id_col is not None:
  255. update_version_id = mapper._get_committed_state_attr_by_column(
  256. row_switch if row_switch else state,
  257. row_switch.dict if row_switch else dict_,
  258. mapper.version_id_col)
  259. yield (state, dict_, mapper, connection,
  260. has_identity, row_switch, update_version_id)
  261. def _organize_states_for_post_update(base_mapper, states,
  262. uowtransaction):
  263. """Make an initial pass across a set of states for UPDATE
  264. corresponding to post_update.
  265. This includes obtaining key information for each state
  266. including its dictionary, mapper, the connection to use for
  267. the execution per state.
  268. """
  269. return _connections_for_states(base_mapper, uowtransaction, states)
  270. def _organize_states_for_delete(base_mapper, states, uowtransaction):
  271. """Make an initial pass across a set of states for DELETE.
  272. This includes calling out before_delete and obtaining
  273. key information for each state including its dictionary,
  274. mapper, the connection to use for the execution per state.
  275. """
  276. for state, dict_, mapper, connection in _connections_for_states(
  277. base_mapper, uowtransaction,
  278. states):
  279. mapper.dispatch.before_delete(mapper, connection, state)
  280. if mapper.version_id_col is not None:
  281. update_version_id = \
  282. mapper._get_committed_state_attr_by_column(
  283. state, dict_,
  284. mapper.version_id_col)
  285. else:
  286. update_version_id = None
  287. yield (
  288. state, dict_, mapper, connection, update_version_id)
  289. def _collect_insert_commands(
  290. table, states_to_insert,
  291. bulk=False, return_defaults=False, render_nulls=False):
  292. """Identify sets of values to use in INSERT statements for a
  293. list of states.
  294. """
  295. for state, state_dict, mapper, connection in states_to_insert:
  296. if table not in mapper._pks_by_table:
  297. continue
  298. params = {}
  299. value_params = {}
  300. propkey_to_col = mapper._propkey_to_col[table]
  301. eval_none = mapper._insert_cols_evaluating_none[table]
  302. for propkey in set(propkey_to_col).intersection(state_dict):
  303. value = state_dict[propkey]
  304. col = propkey_to_col[propkey]
  305. if value is None and propkey not in eval_none and not render_nulls:
  306. continue
  307. elif not bulk and hasattr(value, '__clause_element__') or \
  308. isinstance(value, sql.ClauseElement):
  309. value_params[col.key] = value.__clause_element__() \
  310. if hasattr(value, '__clause_element__') else value
  311. else:
  312. params[col.key] = value
  313. if not bulk:
  314. # for all the columns that have no default and we don't have
  315. # a value and where "None" is not a special value, add
  316. # explicit None to the INSERT. This is a legacy behavior
  317. # which might be worth removing, as it should not be necessary
  318. # and also produces confusion, given that "missing" and None
  319. # now have distinct meanings
  320. for colkey in mapper._insert_cols_as_none[table].\
  321. difference(params).difference(value_params):
  322. params[colkey] = None
  323. if not bulk or return_defaults:
  324. has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
  325. if mapper.base_mapper.eager_defaults:
  326. has_all_defaults = mapper._server_default_cols[table].\
  327. issubset(params)
  328. else:
  329. has_all_defaults = True
  330. else:
  331. has_all_defaults = has_all_pks = True
  332. if mapper.version_id_generator is not False \
  333. and mapper.version_id_col is not None and \
  334. mapper.version_id_col in mapper._cols_by_table[table]:
  335. params[mapper.version_id_col.key] = \
  336. mapper.version_id_generator(None)
  337. yield (
  338. state, state_dict, params, mapper,
  339. connection, value_params, has_all_pks,
  340. has_all_defaults)
  341. def _collect_update_commands(
  342. uowtransaction, table, states_to_update,
  343. bulk=False):
  344. """Identify sets of values to use in UPDATE statements for a
  345. list of states.
  346. This function works intricately with the history system
  347. to determine exactly what values should be updated
  348. as well as how the row should be matched within an UPDATE
  349. statement. Includes some tricky scenarios where the primary
  350. key of an object might have been changed.
  351. """
  352. for state, state_dict, mapper, connection, \
  353. update_version_id in states_to_update:
  354. if table not in mapper._pks_by_table:
  355. continue
  356. pks = mapper._pks_by_table[table]
  357. value_params = {}
  358. propkey_to_col = mapper._propkey_to_col[table]
  359. if bulk:
  360. params = dict(
  361. (propkey_to_col[propkey].key, state_dict[propkey])
  362. for propkey in
  363. set(propkey_to_col).intersection(state_dict).difference(
  364. mapper._pk_keys_by_table[table])
  365. )
  366. has_all_defaults = True
  367. else:
  368. params = {}
  369. for propkey in set(propkey_to_col).intersection(
  370. state.committed_state):
  371. value = state_dict[propkey]
  372. col = propkey_to_col[propkey]
  373. if hasattr(value, '__clause_element__') or \
  374. isinstance(value, sql.ClauseElement):
  375. value_params[col] = value.__clause_element__() \
  376. if hasattr(value, '__clause_element__') else value
  377. # guard against values that generate non-__nonzero__
  378. # objects for __eq__()
  379. elif state.manager[propkey].impl.is_equal(
  380. value, state.committed_state[propkey]) is not True:
  381. params[col.key] = value
  382. if mapper.base_mapper.eager_defaults:
  383. has_all_defaults = mapper._server_onupdate_default_cols[table].\
  384. issubset(params)
  385. else:
  386. has_all_defaults = True
  387. if update_version_id is not None and \
  388. mapper.version_id_col in mapper._cols_by_table[table]:
  389. if not bulk and not (params or value_params):
  390. # HACK: check for history in other tables, in case the
  391. # history is only in a different table than the one
  392. # where the version_id_col is. This logic was lost
  393. # from 0.9 -> 1.0.0 and restored in 1.0.6.
  394. for prop in mapper._columntoproperty.values():
  395. history = (
  396. state.manager[prop.key].impl.get_history(
  397. state, state_dict,
  398. attributes.PASSIVE_NO_INITIALIZE))
  399. if history.added:
  400. break
  401. else:
  402. # no net change, break
  403. continue
  404. col = mapper.version_id_col
  405. params[col._label] = update_version_id
  406. if (bulk or col.key not in params) and \
  407. mapper.version_id_generator is not False:
  408. val = mapper.version_id_generator(update_version_id)
  409. params[col.key] = val
  410. elif not (params or value_params):
  411. continue
  412. has_all_pks = True
  413. if bulk:
  414. pk_params = dict(
  415. (propkey_to_col[propkey]._label, state_dict.get(propkey))
  416. for propkey in
  417. set(propkey_to_col).
  418. intersection(mapper._pk_attr_keys_by_table[table])
  419. )
  420. else:
  421. pk_params = {}
  422. for col in pks:
  423. propkey = mapper._columntoproperty[col].key
  424. history = state.manager[propkey].impl.get_history(
  425. state, state_dict, attributes.PASSIVE_OFF)
  426. if history.added:
  427. if not history.deleted or \
  428. ("pk_cascaded", state, col) in \
  429. uowtransaction.attributes:
  430. pk_params[col._label] = history.added[0]
  431. params.pop(col.key, None)
  432. else:
  433. # else, use the old value to locate the row
  434. pk_params[col._label] = history.deleted[0]
  435. if col in value_params:
  436. has_all_pks = False
  437. else:
  438. pk_params[col._label] = history.unchanged[0]
  439. if pk_params[col._label] is None:
  440. raise orm_exc.FlushError(
  441. "Can't update table %s using NULL for primary "
  442. "key value on column %s" % (table, col))
  443. if params or value_params:
  444. params.update(pk_params)
  445. yield (
  446. state, state_dict, params, mapper,
  447. connection, value_params, has_all_defaults, has_all_pks)
  448. def _collect_post_update_commands(base_mapper, uowtransaction, table,
  449. states_to_update, post_update_cols):
  450. """Identify sets of values to use in UPDATE statements for a
  451. list of states within a post_update operation.
  452. """
  453. for state, state_dict, mapper, connection in states_to_update:
  454. # assert table in mapper._pks_by_table
  455. pks = mapper._pks_by_table[table]
  456. params = {}
  457. hasdata = False
  458. for col in mapper._cols_by_table[table]:
  459. if col in pks:
  460. params[col._label] = \
  461. mapper._get_state_attr_by_column(
  462. state,
  463. state_dict, col, passive=attributes.PASSIVE_OFF)
  464. elif col in post_update_cols:
  465. prop = mapper._columntoproperty[col]
  466. history = state.manager[prop.key].impl.get_history(
  467. state, state_dict,
  468. attributes.PASSIVE_NO_INITIALIZE)
  469. if history.added:
  470. value = history.added[0]
  471. params[col.key] = value
  472. hasdata = True
  473. if hasdata:
  474. yield params, connection
  475. def _collect_delete_commands(base_mapper, uowtransaction, table,
  476. states_to_delete):
  477. """Identify values to use in DELETE statements for a list of
  478. states to be deleted."""
  479. for state, state_dict, mapper, connection, \
  480. update_version_id in states_to_delete:
  481. if table not in mapper._pks_by_table:
  482. continue
  483. params = {}
  484. for col in mapper._pks_by_table[table]:
  485. params[col.key] = \
  486. value = \
  487. mapper._get_committed_state_attr_by_column(
  488. state, state_dict, col)
  489. if value is None:
  490. raise orm_exc.FlushError(
  491. "Can't delete from table %s "
  492. "using NULL for primary "
  493. "key value on column %s" % (table, col))
  494. if update_version_id is not None and \
  495. mapper.version_id_col in mapper._cols_by_table[table]:
  496. params[mapper.version_id_col.key] = update_version_id
  497. yield params, connection
  498. def _emit_update_statements(base_mapper, uowtransaction,
  499. cached_connections, mapper, table, update,
  500. bookkeeping=True):
  501. """Emit UPDATE statements corresponding to value lists collected
  502. by _collect_update_commands()."""
  503. needs_version_id = mapper.version_id_col is not None and \
  504. mapper.version_id_col in mapper._cols_by_table[table]
  505. def update_stmt():
  506. clause = sql.and_()
  507. for col in mapper._pks_by_table[table]:
  508. clause.clauses.append(col == sql.bindparam(col._label,
  509. type_=col.type))
  510. if needs_version_id:
  511. clause.clauses.append(
  512. mapper.version_id_col == sql.bindparam(
  513. mapper.version_id_col._label,
  514. type_=mapper.version_id_col.type))
  515. stmt = table.update(clause)
  516. return stmt
  517. cached_stmt = base_mapper._memo(('update', table), update_stmt)
  518. for (connection, paramkeys, hasvalue, has_all_defaults, has_all_pks), \
  519. records in groupby(
  520. update,
  521. lambda rec: (
  522. rec[4], # connection
  523. set(rec[2]), # set of parameter keys
  524. bool(rec[5]), # whether or not we have "value" parameters
  525. rec[6], # has_all_defaults
  526. rec[7] # has all pks
  527. )
  528. ):
  529. rows = 0
  530. records = list(records)
  531. statement = cached_stmt
  532. # TODO: would be super-nice to not have to determine this boolean
  533. # inside the loop here, in the 99.9999% of the time there's only
  534. # one connection in use
  535. assert_singlerow = connection.dialect.supports_sane_rowcount
  536. assert_multirow = assert_singlerow and \
  537. connection.dialect.supports_sane_multi_rowcount
  538. allow_multirow = has_all_defaults and not needs_version_id
  539. if not has_all_pks:
  540. statement = statement.return_defaults()
  541. elif bookkeeping and not has_all_defaults and \
  542. mapper.base_mapper.eager_defaults:
  543. statement = statement.return_defaults()
  544. elif mapper.version_id_col is not None:
  545. statement = statement.return_defaults(mapper.version_id_col)
  546. if hasvalue:
  547. for state, state_dict, params, mapper, \
  548. connection, value_params, \
  549. has_all_defaults, has_all_pks in records:
  550. c = connection.execute(
  551. statement.values(value_params),
  552. params)
  553. if bookkeeping:
  554. _postfetch(
  555. mapper,
  556. uowtransaction,
  557. table,
  558. state,
  559. state_dict,
  560. c,
  561. c.context.compiled_parameters[0],
  562. value_params)
  563. rows += c.rowcount
  564. check_rowcount = True
  565. else:
  566. if not allow_multirow:
  567. check_rowcount = assert_singlerow
  568. for state, state_dict, params, mapper, \
  569. connection, value_params, has_all_defaults, \
  570. has_all_pks in records:
  571. c = cached_connections[connection].\
  572. execute(statement, params)
  573. # TODO: why with bookkeeping=False?
  574. if bookkeeping:
  575. _postfetch(
  576. mapper,
  577. uowtransaction,
  578. table,
  579. state,
  580. state_dict,
  581. c,
  582. c.context.compiled_parameters[0],
  583. value_params)
  584. rows += c.rowcount
  585. else:
  586. multiparams = [rec[2] for rec in records]
  587. check_rowcount = assert_multirow or (
  588. assert_singlerow and
  589. len(multiparams) == 1
  590. )
  591. c = cached_connections[connection].\
  592. execute(statement, multiparams)
  593. rows += c.rowcount
  594. for state, state_dict, params, mapper, \
  595. connection, value_params, \
  596. has_all_defaults, has_all_pks in records:
  597. if bookkeeping:
  598. _postfetch(
  599. mapper,
  600. uowtransaction,
  601. table,
  602. state,
  603. state_dict,
  604. c,
  605. c.context.compiled_parameters[0],
  606. value_params)
  607. if check_rowcount:
  608. if rows != len(records):
  609. raise orm_exc.StaleDataError(
  610. "UPDATE statement on table '%s' expected to "
  611. "update %d row(s); %d were matched." %
  612. (table.description, len(records), rows))
  613. elif needs_version_id:
  614. util.warn("Dialect %s does not support updated rowcount "
  615. "- versioning cannot be verified." %
  616. c.dialect.dialect_description)
  617. def _emit_insert_statements(base_mapper, uowtransaction,
  618. cached_connections, mapper, table, insert,
  619. bookkeeping=True):
  620. """Emit INSERT statements corresponding to value lists collected
  621. by _collect_insert_commands()."""
  622. cached_stmt = base_mapper._memo(('insert', table), table.insert)
  623. for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
  624. records in groupby(
  625. insert,
  626. lambda rec: (
  627. rec[4], # connection
  628. set(rec[2]), # parameter keys
  629. bool(rec[5]), # whether we have "value" parameters
  630. rec[6],
  631. rec[7])):
  632. statement = cached_stmt
  633. if not bookkeeping or \
  634. (
  635. has_all_defaults
  636. or not base_mapper.eager_defaults
  637. or not connection.dialect.implicit_returning
  638. ) and has_all_pks and not hasvalue:
  639. records = list(records)
  640. multiparams = [rec[2] for rec in records]
  641. c = cached_connections[connection].\
  642. execute(statement, multiparams)
  643. if bookkeeping:
  644. for (state, state_dict, params, mapper_rec,
  645. conn, value_params, has_all_pks, has_all_defaults), \
  646. last_inserted_params in \
  647. zip(records, c.context.compiled_parameters):
  648. if state:
  649. _postfetch(
  650. mapper_rec,
  651. uowtransaction,
  652. table,
  653. state,
  654. state_dict,
  655. c,
  656. last_inserted_params,
  657. value_params)
  658. else:
  659. _postfetch_bulk_save(mapper_rec, state_dict, table)
  660. else:
  661. if not has_all_defaults and base_mapper.eager_defaults:
  662. statement = statement.return_defaults()
  663. elif mapper.version_id_col is not None:
  664. statement = statement.return_defaults(mapper.version_id_col)
  665. for state, state_dict, params, mapper_rec, \
  666. connection, value_params, \
  667. has_all_pks, has_all_defaults in records:
  668. if value_params:
  669. result = connection.execute(
  670. statement.values(value_params),
  671. params)
  672. else:
  673. result = cached_connections[connection].\
  674. execute(statement, params)
  675. primary_key = result.context.inserted_primary_key
  676. if primary_key is not None:
  677. # set primary key attributes
  678. for pk, col in zip(primary_key,
  679. mapper._pks_by_table[table]):
  680. prop = mapper_rec._columntoproperty[col]
  681. if state_dict.get(prop.key) is None:
  682. state_dict[prop.key] = pk
  683. if bookkeeping:
  684. if state:
  685. _postfetch(
  686. mapper_rec,
  687. uowtransaction,
  688. table,
  689. state,
  690. state_dict,
  691. result,
  692. result.context.compiled_parameters[0],
  693. value_params)
  694. else:
  695. _postfetch_bulk_save(mapper_rec, state_dict, table)
  696. def _emit_post_update_statements(base_mapper, uowtransaction,
  697. cached_connections, mapper, table, update):
  698. """Emit UPDATE statements corresponding to value lists collected
  699. by _collect_post_update_commands()."""
  700. def update_stmt():
  701. clause = sql.and_()
  702. for col in mapper._pks_by_table[table]:
  703. clause.clauses.append(col == sql.bindparam(col._label,
  704. type_=col.type))
  705. return table.update(clause)
  706. statement = base_mapper._memo(('post_update', table), update_stmt)
  707. # execute each UPDATE in the order according to the original
  708. # list of states to guarantee row access order, but
  709. # also group them into common (connection, cols) sets
  710. # to support executemany().
  711. for key, grouper in groupby(
  712. update, lambda rec: (
  713. rec[1], # connection
  714. set(rec[0]) # parameter keys
  715. )
  716. ):
  717. connection = key[0]
  718. multiparams = [params for params, conn in grouper]
  719. cached_connections[connection].\
  720. execute(statement, multiparams)
  721. def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
  722. mapper, table, delete):
  723. """Emit DELETE statements corresponding to value lists collected
  724. by _collect_delete_commands()."""
  725. need_version_id = mapper.version_id_col is not None and \
  726. mapper.version_id_col in mapper._cols_by_table[table]
  727. def delete_stmt():
  728. clause = sql.and_()
  729. for col in mapper._pks_by_table[table]:
  730. clause.clauses.append(
  731. col == sql.bindparam(col.key, type_=col.type))
  732. if need_version_id:
  733. clause.clauses.append(
  734. mapper.version_id_col ==
  735. sql.bindparam(
  736. mapper.version_id_col.key,
  737. type_=mapper.version_id_col.type
  738. )
  739. )
  740. return table.delete(clause)
  741. statement = base_mapper._memo(('delete', table), delete_stmt)
  742. for connection, recs in groupby(
  743. delete,
  744. lambda rec: rec[1] # connection
  745. ):
  746. del_objects = [params for params, connection in recs]
  747. connection = cached_connections[connection]
  748. expected = len(del_objects)
  749. rows_matched = -1
  750. only_warn = False
  751. if connection.dialect.supports_sane_multi_rowcount:
  752. c = connection.execute(statement, del_objects)
  753. if not need_version_id:
  754. only_warn = True
  755. rows_matched = c.rowcount
  756. elif need_version_id:
  757. if connection.dialect.supports_sane_rowcount:
  758. rows_matched = 0
  759. # execute deletes individually so that versioned
  760. # rows can be verified
  761. for params in del_objects:
  762. c = connection.execute(statement, params)
  763. rows_matched += c.rowcount
  764. else:
  765. util.warn(
  766. "Dialect %s does not support deleted rowcount "
  767. "- versioning cannot be verified." %
  768. connection.dialect.dialect_description,
  769. stacklevel=12)
  770. connection.execute(statement, del_objects)
  771. else:
  772. connection.execute(statement, del_objects)
  773. if base_mapper.confirm_deleted_rows and \
  774. rows_matched > -1 and expected != rows_matched:
  775. if only_warn:
  776. util.warn(
  777. "DELETE statement on table '%s' expected to "
  778. "delete %d row(s); %d were matched. Please set "
  779. "confirm_deleted_rows=False within the mapper "
  780. "configuration to prevent this warning." %
  781. (table.description, expected, rows_matched)
  782. )
  783. else:
  784. raise orm_exc.StaleDataError(
  785. "DELETE statement on table '%s' expected to "
  786. "delete %d row(s); %d were matched. Please set "
  787. "confirm_deleted_rows=False within the mapper "
  788. "configuration to prevent this warning." %
  789. (table.description, expected, rows_matched)
  790. )
  791. def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
  792. """finalize state on states that have been inserted or updated,
  793. including calling after_insert/after_update events.
  794. """
  795. for state, state_dict, mapper, connection, has_identity in states:
  796. if mapper._readonly_props:
  797. readonly = state.unmodified_intersection(
  798. [p.key for p in mapper._readonly_props
  799. if p.expire_on_flush or p.key not in state.dict]
  800. )
  801. if readonly:
  802. state._expire_attributes(state.dict, readonly)
  803. # if eager_defaults option is enabled, load
  804. # all expired cols. Else if we have a version_id_col, make sure
  805. # it isn't expired.
  806. toload_now = []
  807. if base_mapper.eager_defaults:
  808. toload_now.extend(
  809. state._unloaded_non_object.intersection(
  810. mapper._server_default_plus_onupdate_propkeys)
  811. )
  812. if mapper.version_id_col is not None and \
  813. mapper.version_id_generator is False:
  814. if mapper._version_id_prop.key in state.unloaded:
  815. toload_now.extend([mapper._version_id_prop.key])
  816. if toload_now:
  817. state.key = base_mapper._identity_key_from_state(state)
  818. loading.load_on_ident(
  819. uowtransaction.session.query(mapper),
  820. state.key, refresh_state=state,
  821. only_load_props=toload_now)
  822. # call after_XXX extensions
  823. if not has_identity:
  824. mapper.dispatch.after_insert(mapper, connection, state)
  825. else:
  826. mapper.dispatch.after_update(mapper, connection, state)
  827. def _postfetch(mapper, uowtransaction, table,
  828. state, dict_, result, params, value_params):
  829. """Expire attributes in need of newly persisted database state,
  830. after an INSERT or UPDATE statement has proceeded for that
  831. state."""
  832. prefetch_cols = result.context.compiled.prefetch
  833. postfetch_cols = result.context.compiled.postfetch
  834. returning_cols = result.context.compiled.returning
  835. if mapper.version_id_col is not None and \
  836. mapper.version_id_col in mapper._cols_by_table[table]:
  837. prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
  838. refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
  839. if refresh_flush:
  840. load_evt_attrs = []
  841. if returning_cols:
  842. row = result.context.returned_defaults
  843. if row is not None:
  844. for col in returning_cols:
  845. # pk cols returned from insert are handled
  846. # distinctly, don't step on the values here
  847. if col.primary_key and result.context.isinsert:
  848. continue
  849. # note that columns can be in the "return defaults" that are
  850. # not mapped to this mapper, typically because they are
  851. # "excluded", which can be specified directly or also occurs
  852. # when using declarative w/ single table inheritance
  853. prop = mapper._columntoproperty.get(col)
  854. if prop:
  855. dict_[prop.key] = row[col]
  856. if refresh_flush:
  857. load_evt_attrs.append(prop.key)
  858. for c in prefetch_cols:
  859. if c.key in params and c in mapper._columntoproperty:
  860. dict_[mapper._columntoproperty[c].key] = params[c.key]
  861. if refresh_flush:
  862. load_evt_attrs.append(mapper._columntoproperty[c].key)
  863. if refresh_flush and load_evt_attrs:
  864. mapper.class_manager.dispatch.refresh_flush(
  865. state, uowtransaction, load_evt_attrs)
  866. if postfetch_cols:
  867. state._expire_attributes(state.dict,
  868. [mapper._columntoproperty[c].key
  869. for c in postfetch_cols if c in
  870. mapper._columntoproperty]
  871. )
  872. # synchronize newly inserted ids from one table to the next
  873. # TODO: this still goes a little too often. would be nice to
  874. # have definitive list of "columns that changed" here
  875. for m, equated_pairs in mapper._table_to_equated[table]:
  876. sync.populate(state, m, state, m,
  877. equated_pairs,
  878. uowtransaction,
  879. mapper.passive_updates)
  880. def _postfetch_bulk_save(mapper, dict_, table):
  881. for m, equated_pairs in mapper._table_to_equated[table]:
  882. sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
  883. def _connections_for_states(base_mapper, uowtransaction, states):
  884. """Return an iterator of (state, state.dict, mapper, connection).
  885. The states are sorted according to _sort_states, then paired
  886. with the connection they should be using for the given
  887. unit of work transaction.
  888. """
  889. # if session has a connection callable,
  890. # organize individual states with the connection
  891. # to use for update
  892. if uowtransaction.session.connection_callable:
  893. connection_callable = \
  894. uowtransaction.session.connection_callable
  895. else:
  896. connection = uowtransaction.transaction.connection(base_mapper)
  897. connection_callable = None
  898. for state in _sort_states(states):
  899. if connection_callable:
  900. connection = connection_callable(base_mapper, state.obj())
  901. mapper = state.manager.mapper
  902. yield state, state.dict, mapper, connection
  903. def _cached_connection_dict(base_mapper):
  904. # dictionary of connection->connection_with_cache_options.
  905. return util.PopulateDict(
  906. lambda conn: conn.execution_options(
  907. compiled_cache=base_mapper._compiled_cache
  908. ))
  909. def _sort_states(states):
  910. pending = set(states)
  911. persistent = set(s for s in pending if s.key is not None)
  912. pending.difference_update(persistent)
  913. return sorted(pending, key=operator.attrgetter("insert_order")) + \
  914. sorted(persistent, key=lambda q: q.key[1])
  915. class BulkUD(object):
  916. """Handle bulk update and deletes via a :class:`.Query`."""
  917. def __init__(self, query):
  918. self.query = query.enable_eagerloads(False)
  919. self.mapper = self.query._bind_mapper()
  920. self._validate_query_state()
  921. def _validate_query_state(self):
  922. for attr, methname, notset, op in (
  923. ('_limit', 'limit()', None, operator.is_),
  924. ('_offset', 'offset()', None, operator.is_),
  925. ('_order_by', 'order_by()', False, operator.is_),
  926. ('_group_by', 'group_by()', False, operator.is_),
  927. ('_distinct', 'distinct()', False, operator.is_),
  928. (
  929. '_from_obj',
  930. 'join(), outerjoin(), select_from(), or from_self()',
  931. (), operator.eq)
  932. ):
  933. if not op(getattr(self.query, attr), notset):
  934. raise sa_exc.InvalidRequestError(
  935. "Can't call Query.update() or Query.delete() "
  936. "when %s has been called" %
  937. (methname, )
  938. )
  939. @property
  940. def session(self):
  941. return self.query.session
  942. @classmethod
  943. def _factory(cls, lookup, synchronize_session, *arg):
  944. try:
  945. klass = lookup[synchronize_session]
  946. except KeyError:
  947. raise sa_exc.ArgumentError(
  948. "Valid strategies for session synchronization "
  949. "are %s" % (", ".join(sorted(repr(x)
  950. for x in lookup))))
  951. else:
  952. return klass(*arg)
  953. def exec_(self):
  954. self._do_pre()
  955. self._do_pre_synchronize()
  956. self._do_exec()
  957. self._do_post_synchronize()
  958. self._do_post()
  959. @util.dependencies("sqlalchemy.orm.query")
  960. def _do_pre(self, querylib):
  961. query = self.query
  962. self.context = querylib.QueryContext(query)
  963. if isinstance(query._entities[0], querylib._ColumnEntity):
  964. # check for special case of query(table)
  965. tables = set()
  966. for ent in query._entities:
  967. if not isinstance(ent, querylib._ColumnEntity):
  968. tables.clear()
  969. break
  970. else:
  971. tables.update(_from_objects(ent.column))
  972. if len(tables) != 1:
  973. raise sa_exc.InvalidRequestError(
  974. "This operation requires only one Table or "
  975. "entity be specified as the target."
  976. )
  977. else:
  978. self.primary_table = tables.pop()
  979. else:
  980. self.primary_table = query._only_entity_zero(
  981. "This operation requires only one Table or "
  982. "entity be specified as the target."
  983. ).mapper.local_table
  984. session = query.session
  985. if query._autoflush:
  986. session._autoflush()
  987. def _do_pre_synchronize(self):
  988. pass
  989. def _do_post_synchronize(self):
  990. pass
  991. class BulkEvaluate(BulkUD):
  992. """BulkUD which does the 'evaluate' method of session state resolution."""
  993. def _additional_evaluators(self, evaluator_compiler):
  994. pass
  995. def _do_pre_synchronize(self):
  996. query = self.query
  997. target_cls = query._mapper_zero().class_
  998. try:
  999. evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
  1000. if query.whereclause is not None:
  1001. eval_condition = evaluator_compiler.process(
  1002. query.whereclause)
  1003. else:
  1004. def eval_condition(obj):
  1005. return True
  1006. self._additional_evaluators(evaluator_compiler)
  1007. except evaluator.UnevaluatableError:
  1008. raise sa_exc.InvalidRequestError(
  1009. "Could not evaluate current criteria in Python. "
  1010. "Specify 'fetch' or False for the "
  1011. "synchronize_session parameter.")
  1012. # TODO: detect when the where clause is a trivial primary key match
  1013. self.matched_objects = [
  1014. obj for (cls, pk), obj in
  1015. query.session.identity_map.items()
  1016. if issubclass(cls, target_cls) and
  1017. eval_condition(obj)]
  1018. class BulkFetch(BulkUD):
  1019. """BulkUD which does the 'fetch' method of session state resolution."""
  1020. def _do_pre_synchronize(self):
  1021. query = self.query
  1022. session = query.session
  1023. context = query._compile_context()
  1024. select_stmt = context.statement.with_only_columns(
  1025. self.primary_table.primary_key)
  1026. self.matched_rows = session.execute(
  1027. select_stmt,
  1028. mapper=self.mapper,
  1029. params=query._params).fetchall()
  1030. class BulkUpdate(BulkUD):
  1031. """BulkUD which handles UPDATEs."""
  1032. def __init__(self, query, values, update_kwargs):
  1033. super(BulkUpdate, self).__init__(query)
  1034. self.values = values
  1035. self.update_kwargs = update_kwargs
  1036. @classmethod
  1037. def factory(cls, query, synchronize_session, values, update_kwargs):
  1038. return BulkUD._factory({
  1039. "evaluate": BulkUpdateEvaluate,
  1040. "fetch": BulkUpdateFetch,
  1041. False: BulkUpdate
  1042. }, synchronize_session, query, values, update_kwargs)
  1043. def _resolve_string_to_expr(self, key):
  1044. if self.mapper and isinstance(key, util.string_types):
  1045. attr = _entity_descriptor(self.mapper, key)
  1046. return attr.__clause_element__()
  1047. else:
  1048. return key
  1049. def _resolve_key_to_attrname(self, key):
  1050. if self.mapper and isinstance(key, util.string_types):
  1051. attr = _entity_descriptor(self.mapper, key)
  1052. return attr.property.key
  1053. elif isinstance(key, attributes.InstrumentedAttribute):
  1054. return key.key
  1055. elif hasattr(key, '__clause_element__'):
  1056. key = key.__clause_element__()
  1057. if self.mapper and isinstance(key, expression.ColumnElement):
  1058. try:
  1059. attr = self.mapper._columntoproperty[key]
  1060. except orm_exc.UnmappedColumnError:
  1061. return None
  1062. else:
  1063. return attr.key
  1064. else:
  1065. raise sa_exc.InvalidRequestError(
  1066. "Invalid expression type: %r" % key)
  1067. def _do_exec(self):
  1068. values = [
  1069. (self._resolve_string_to_expr(k), v)
  1070. for k, v in (
  1071. self.values.items() if hasattr(self.values, 'items')
  1072. else self.values)
  1073. ]
  1074. if not self.update_kwargs.get('preserve_parameter_order', False):
  1075. values = dict(values)
  1076. update_stmt = sql.update(self.primary_table,
  1077. self.context.whereclause, values,
  1078. **self.update_kwargs)
  1079. self.result = self.query.session.execute(
  1080. update_stmt, params=self.query._params,
  1081. mapper=self.mapper)
  1082. self.rowcount = self.result.rowcount
  1083. def _do_post(self):
  1084. session = self.query.session
  1085. session.dispatch.after_bulk_update(self)
  1086. class BulkDelete(BulkUD):
  1087. """BulkUD which handles DELETEs."""
  1088. def __init__(self, query):
  1089. super(BulkDelete, self).__init__(query)
  1090. @classmethod
  1091. def factory(cls, query, synchronize_session):
  1092. return BulkUD._factory({
  1093. "evaluate": BulkDeleteEvaluate,
  1094. "fetch": BulkDeleteFetch,
  1095. False: BulkDelete
  1096. }, synchronize_session, query)
  1097. def _do_exec(self):
  1098. delete_stmt = sql.delete(self.primary_table,
  1099. self.context.whereclause)
  1100. self.result = self.query.session.execute(
  1101. delete_stmt,
  1102. params=self.query._params,
  1103. mapper=self.mapper)
  1104. self.rowcount = self.result.rowcount
  1105. def _do_post(self):
  1106. session = self.query.session
  1107. session.dispatch.after_bulk_delete(self)
  1108. class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
  1109. """BulkUD which handles UPDATEs using the "evaluate"
  1110. method of session resolution."""
  1111. def _additional_evaluators(self, evaluator_compiler):
  1112. self.value_evaluators = {}
  1113. values = (self.values.items() if hasattr(self.values, 'items')
  1114. else self.values)
  1115. for key, value in values:
  1116. key = self._resolve_key_to_attrname(key)
  1117. if key is not None:
  1118. self.value_evaluators[key] = evaluator_compiler.process(
  1119. expression._literal_as_binds(value))
  1120. def _do_post_synchronize(self):
  1121. session = self.query.session
  1122. states = set()
  1123. evaluated_keys = list(self.value_evaluators.keys())
  1124. for obj in self.matched_objects:
  1125. state, dict_ = attributes.instance_state(obj),\
  1126. attributes.instance_dict(obj)
  1127. # only evaluate unmodified attributes
  1128. to_evaluate = state.unmodified.intersection(
  1129. evaluated_keys)
  1130. for key in to_evaluate:
  1131. dict_[key] = self.value_evaluators[key](obj)
  1132. state._commit(dict_, list(to_evaluate))
  1133. # expire attributes with pending changes
  1134. # (there was no autoflush, so they are overwritten)
  1135. state._expire_attributes(dict_,
  1136. set(evaluated_keys).
  1137. difference(to_evaluate))
  1138. states.add(state)
  1139. session._register_altered(states)
  1140. class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
  1141. """BulkUD which handles DELETEs using the "evaluate"
  1142. method of session resolution."""
  1143. def _do_post_synchronize(self):
  1144. self.query.session._remove_newly_deleted(
  1145. [attributes.instance_state(obj)
  1146. for obj in self.matched_objects])
  1147. class BulkUpdateFetch(BulkFetch, BulkUpdate):
  1148. """BulkUD which handles UPDATEs using the "fetch"
  1149. method of session resolution."""
  1150. def _do_post_synchronize(self):
  1151. session = self.query.session
  1152. target_mapper = self.query._mapper_zero()
  1153. states = set([
  1154. attributes.instance_state(session.identity_map[identity_key])
  1155. for identity_key in [
  1156. target_mapper.identity_key_from_primary_key(
  1157. list(primary_key))
  1158. for primary_key in self.matched_rows
  1159. ]
  1160. if identity_key in session.identity_map
  1161. ])
  1162. attrib = [_attr_as_key(k) for k in self.values]
  1163. for state in states:
  1164. session._expire_state(state, attrib)
  1165. session._register_altered(states)
  1166. class BulkDeleteFetch(BulkFetch, BulkDelete):
  1167. """BulkUD which handles DELETEs using the "fetch"
  1168. method of session resolution."""
  1169. def _do_post_synchronize(self):
  1170. session = self.query.session
  1171. target_mapper = self.query._mapper_zero()
  1172. for primary_key in self.matched_rows:
  1173. # TODO: inline this and call remove_newly_deleted
  1174. # once
  1175. identity_key = target_mapper.identity_key_from_primary_key(
  1176. list(primary_key))
  1177. if identity_key in session.identity_map:
  1178. session._remove_newly_deleted(
  1179. [attributes.instance_state(
  1180. session.identity_map[identity_key]
  1181. )]
  1182. )