4545#include "utils/builtins.h"
4646#include "utils/rel.h"
4747
48- static TM_Result heapam_tuple_lock_internal (Relation relation , ItemPointer tid ,
49- Snapshot snapshot , TupleTableSlot * slot ,
50- CommandId cid , LockTupleMode mode ,
51- LockWaitPolicy wait_policy , uint8 flags ,
52- TM_FailureData * tmfd , bool updated );
53-
5448static void reform_and_rewrite_tuple (HeapTuple tuple ,
5549 Relation OldHeap , Relation NewHeap ,
5650 Datum * values , bool * isnull , RewriteState rwstate );
@@ -305,55 +299,22 @@ heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
305299static TM_Result
306300heapam_tuple_delete (Relation relation , ItemPointer tid , CommandId cid ,
307301 Snapshot snapshot , Snapshot crosscheck , bool wait ,
308- TM_FailureData * tmfd , bool changingPart ,
309- LazyTupleTableSlot * lockedSlot )
302+ TM_FailureData * tmfd , bool changingPart )
310303{
311- TM_Result result ;
312-
313304 /*
314305 * Currently Deleting of index tuples are handled at vacuum, in case if
315306 * the storage itself is cleaning the dead tuples by itself, it is the
316307 * time to call the index tuple deletion also.
317308 */
318- result = heap_delete (relation , tid , cid , crosscheck , wait ,
319- tmfd , changingPart );
320-
321- /*
322- * If the tuple has been concurrently updated, then get the lock on it.
323- * (Do this if caller asked for tat by providing a 'lockedSlot'.) With the
324- * lock held retry of delete should succeed even if there are more
325- * concurrent update attempts.
326- */
327- if (result == TM_Updated && lockedSlot )
328- {
329- TupleTableSlot * evalSlot ;
330-
331- Assert (wait );
332-
333- evalSlot = LAZY_TTS_EVAL (lockedSlot );
334- result = heapam_tuple_lock_internal (relation , tid , snapshot ,
335- evalSlot , cid , LockTupleExclusive ,
336- LockWaitBlock ,
337- TUPLE_LOCK_FLAG_FIND_LAST_VERSION ,
338- tmfd , true);
339-
340- if (result == TM_Ok )
341- {
342- tmfd -> traversed = true;
343- return TM_Updated ;
344- }
345- }
346-
347- return result ;
309+ return heap_delete (relation , tid , cid , crosscheck , wait , tmfd , changingPart );
348310}
349311
350312
351313static TM_Result
352314heapam_tuple_update (Relation relation , ItemPointer otid , TupleTableSlot * slot ,
353315 CommandId cid , Snapshot snapshot , Snapshot crosscheck ,
354316 bool wait , TM_FailureData * tmfd ,
355- LockTupleMode * lockmode , TU_UpdateIndexes * update_indexes ,
356- LazyTupleTableSlot * lockedSlot )
317+ LockTupleMode * lockmode , TU_UpdateIndexes * update_indexes )
357318{
358319 bool shouldFree = true;
359320 HeapTuple tuple = ExecFetchSlotHeapTuple (slot , true, & shouldFree );
@@ -391,32 +352,6 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
391352 if (shouldFree )
392353 pfree (tuple );
393354
394- /*
395- * If the tuple has been concurrently updated, then get the lock on it.
396- * (Do this if caller asked for tat by providing a 'lockedSlot'.) With the
397- * lock held retry of update should succeed even if there are more
398- * concurrent update attempts.
399- */
400- if (result == TM_Updated && lockedSlot )
401- {
402- TupleTableSlot * evalSlot ;
403-
404- Assert (wait );
405-
406- evalSlot = LAZY_TTS_EVAL (lockedSlot );
407- result = heapam_tuple_lock_internal (relation , otid , snapshot ,
408- evalSlot , cid , * lockmode ,
409- LockWaitBlock ,
410- TUPLE_LOCK_FLAG_FIND_LAST_VERSION ,
411- tmfd , true);
412-
413- if (result == TM_Ok )
414- {
415- tmfd -> traversed = true;
416- return TM_Updated ;
417- }
418- }
419-
420355 return result ;
421356}
422357
@@ -425,26 +360,10 @@ heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
425360 TupleTableSlot * slot , CommandId cid , LockTupleMode mode ,
426361 LockWaitPolicy wait_policy , uint8 flags ,
427362 TM_FailureData * tmfd )
428- {
429- return heapam_tuple_lock_internal (relation , tid , snapshot , slot , cid ,
430- mode , wait_policy , flags , tmfd , false);
431- }
432-
433- /*
434- * This routine does the work for heapam_tuple_lock(), but also support
435- * `updated` argument to re-use the work done by heapam_tuple_update() or
436- * heapam_tuple_delete() on figuring out that tuple was concurrently updated.
437- */
438- static TM_Result
439- heapam_tuple_lock_internal (Relation relation , ItemPointer tid ,
440- Snapshot snapshot , TupleTableSlot * slot ,
441- CommandId cid , LockTupleMode mode ,
442- LockWaitPolicy wait_policy , uint8 flags ,
443- TM_FailureData * tmfd , bool updated )
444363{
445364 BufferHeapTupleTableSlot * bslot = (BufferHeapTupleTableSlot * ) slot ;
446365 TM_Result result ;
447- Buffer buffer = InvalidBuffer ;
366+ Buffer buffer ;
448367 HeapTuple tuple = & bslot -> base .tupdata ;
449368 bool follow_updates ;
450369
@@ -455,26 +374,16 @@ heapam_tuple_lock_internal(Relation relation, ItemPointer tid,
455374
456375tuple_lock_retry :
457376 tuple -> t_self = * tid ;
458- if (!updated )
459- result = heap_lock_tuple (relation , tuple , cid , mode , wait_policy ,
460- follow_updates , & buffer , tmfd );
461- else
462- result = TM_Updated ;
377+ result = heap_lock_tuple (relation , tuple , cid , mode , wait_policy ,
378+ follow_updates , & buffer , tmfd );
463379
464380 if (result == TM_Updated &&
465381 (flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION ))
466382 {
467- if (!updated )
468- {
469- /* Should not encounter speculative tuple on recheck */
470- Assert (!HeapTupleHeaderIsSpeculative (tuple -> t_data ));
383+ /* Should not encounter speculative tuple on recheck */
384+ Assert (!HeapTupleHeaderIsSpeculative (tuple -> t_data ));
471385
472- ReleaseBuffer (buffer );
473- }
474- else
475- {
476- updated = false;
477- }
386+ ReleaseBuffer (buffer );
478387
479388 if (!ItemPointerEquals (& tmfd -> ctid , & tuple -> t_self ))
480389 {
0 commit comments