o
    ®Ÿ?eµƒ  ã                   @   sÜ   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z
 dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ dd„ ZG dd„ dƒZG dd„ dƒZdd„ Zdd„ Zdd„ Zdd„ ZdS )zHUtilities for managing state of v1 control flow for computing gradients.é    )Úconstant_op)Údtypes)Úops)Útensor_util)Ú	array_ops)Úcontrol_flow_ops)Úcontrol_flow_util)Úcontrol_flow_v2_func_graphs)Údefault_gradient)Úgen_data_flow_ops)Úgen_resource_variable_ops)Úresource_variable_opsc           	      C   sÐ   | j }t ¡  ¡ }|dur|j nd}t d¡}|d|fvrf|j}|du r-td||j f ƒ‚|j ¡ }t	 
||¡r=||9 }nt |¡}|du rTtd||j |j ||j f ƒ‚||9 }t	j|j|d}|d|fvs|S )a  Calculate a max_size for use by stack ops inside an XLA while_loop.

  Args:
    value: The value inside the while_loop forward context.  Used for printing
      error messages.
    while_ctxt: The forward context inside which value resides.  This does not
      always match the value's immediate context, as `value` may be inside e.g.
      a cond context inside the while_loop.

  Returns:
    A tensor containing the `max_size` to feed to a Stack initializer.

  Raises:
    ValueError: If `value` is nested inside a `while_loop` that either
      lacks a `maximum_iterations` parameter, or the `maximum_iterations`
      parameter:

        - is inside a `while_loop` that is a parent of the calling context, and
        - cannot be evaluated at graph build time to a constant.
  NÚ é   z–Cannot create a gradient accumulator for tensor '%s' inside XLA while_loop because maximum_iterations was not passed to the tf.while_loop call ('%s').a&  Cannot create a gradient accumulator for tensor '%s' inside XLA while_loop. maximum_iterations tensor '%s' for while_loop context '%s' must be statically known (e.g. a constant value or known shape dimension), or be defined at or outside the while loop context '%s' (currently defined in '%s').)Z	stop_ctxt)Únamer   Úget_default_graphÚ_get_control_flow_contextr   ÚconstantÚmaximum_iterationsÚ
ValueErrorÚopÚutilZIsContainingContextr   Úconstant_valueZGetContainingWhileContextÚouter_context)	ÚvalueZ
while_ctxtZ
value_nameÚ	curr_ctxtZcurr_ctxt_nameÚmax_sizeZmax_iterZmax_iter_ctxtZconst_max_iter© r   úi/home/www/facesmatcher.com/pyenv/lib/python3.10/site-packages/tensorflow/python/ops/control_flow_state.pyÚ&_GetMaxSizeFromNestedMaximumIterations!   s<   
þÿ


ÿûÿÿÞ%r   c                   @   sà   e Zd ZdZdd„ Zedd„ ƒZedd„ ƒZedd	„ ƒZed
d„ ƒZ	edd„ ƒZ
edd„ ƒZedd„ ƒZedd„ ƒZedd„ ƒZedd„ ƒZedd„ ƒZedd„ ƒZedd„ ƒZejdd„ ƒZd'd d!„Z	d'd"d#„Zd$d%„ Zd&S )(Ú_GradLoopStatea  The state used for constructing the gradient graph for a while loop.

  We create a _GradLoopState for each while loop in forward and its
  corresponding while loop in backprop. This gives us access to both
  the forward and the backprop WhileContexts.

  During the construction of gradient graph, any time when we detect
  a forward value that is needed for backprop, we create a history
  accumulator and add it to `history_map`. Any time when we backprop
  a loop switch op (in _SwitchGrad), we add the grad merge op in
  `switch_map`.
  c           	      C   s¤  d | _ d | _d | _d | _d | _d | _d | _i | _i | _g | _	g | _
t|jƒ| _t|jƒ| _|| _ |r6|j}nt|dƒs?tdƒ‚|j}|j ¡  |rN| ¡  | |¡\}}|r[| ¡  W d   ƒ n1 sew   Y  || _|| _|r©| |j¡ | |¡}|j}| ¡  tj|j|j |j!|j"|j| d| _| #||¡}| j $||¡| _| ¡  d S |r¯| ¡  tj|j|j |j!|j"|j| d| _| j $||¡| _|rÐ| ¡  d S d S )Nr   z[Failed to call gradients on a while loop withoutproperly serializing graph via MetaGraphDef)r   Úparallel_iterationsÚ	back_propÚswap_memoryr   Ú
grad_state)%Ú_outer_grad_stateÚ_forward_contextÚ_forward_indexÚ_forward_syncÚ_grad_contextÚ_grad_indexÚ
_grad_syncÚ_history_mapÚ_switch_mapÚ_unused_exitsÚ_deferred_exitsÚlistÚ
loop_exitsÚ_forward_loop_exitsÚlenÚ_pending_exits_countÚforward_contextÚhasattrr   r   Z_graphÚ
as_defaultÚEnterZAddForwardLoopCounterÚExitZAddNamer   ÚAddForwardAccumulatorÚgrad_contextr   ZWhileContextr   r!   r"   r#   ÚAddBackpropAccumulatedValueZAddBackpropLoopCounter)	ÚselfÚforward_ctxtÚouter_grad_stateÚouter_forward_ctxtÚcntÚforward_indexZhistory_cntÚouter_grad_ctxtZreal_cntr   r   r   Ú__init__v   s~   
€û
úÿúÿÿz_GradLoopState.__init__c                 C   ó   | j S )z#The grad loop state for outer loop.)r%   ©r=   r   r   r   r?   Ì   ó   z_GradLoopState.outer_grad_statec                 C   rE   )z#The while loop context for forward.)r&   rF   r   r   r   r5   Ñ   rG   z_GradLoopState.forward_contextc                 C   rE   )zThe loop index of forward loop.)r'   rF   r   r   r   rB   Ö   rG   z_GradLoopState.forward_indexc                 C   sf   | j du r0t d¡ tjdd| _ W d  ƒ n1 sw   Y  | j  | j¡ | jj 	| j ¡ | j S )zžA control trigger node for synchronization in the forward loop.

    One main use is to keep the push ops of a stack executed in the
    iteration order.
    NZf_sync©r   )
r(   r   Úcontrol_dependenciesr   Úcontrol_triggerÚ_set_control_flow_contextr&   r'   r   Ú_add_control_inputrF   r   r   r   Úforward_syncÛ   s   
ÿz_GradLoopState.forward_syncc                 C   rE   )z,The corresponding WhileContext for gradient.)r)   rF   r   r   r   r;   é   rG   z_GradLoopState.grad_contextc                 C   rE   )z The loop index of backprop loop.)r*   rF   r   r   r   Ú
grad_indexî   rG   z_GradLoopState.grad_indexc                 C   s~   | j du r<t d¡ tjdd| _ W d  ƒ n1 sw   Y  | j  | j¡ | jj 	| j ¡ | jj
r<| jj
 | j ¡ | j S )zšA control trigger node for synchronization in the grad loop.

    One main use is to keep the pop ops of a stack executed in the
    iteration order.
    NZb_syncrH   )r+   r   rI   r   rJ   rK   r)   r*   r   rL   r   Z
AddInnerOprF   r   r   r   Ú	grad_syncó   s   
ÿz_GradLoopState.grad_syncc                 C   rE   )z9The map that records all the tensors needed for backprop.)r,   rF   r   r   r   Úhistory_map  rG   z_GradLoopState.history_mapc                 C   rE   )z;The map that records all the Switch ops for the while loop.)r-   rF   r   r   r   Ú
switch_map  rG   z_GradLoopState.switch_mapc                 C   rE   )zThe list of "unused" exits.)r.   rF   r   r   r   Úunused_exits  rG   z_GradLoopState.unused_exitsc                 C   rE   )zThe list of "deferred" exits.)r/   rF   r   r   r   Údeferred_exits  rG   z_GradLoopState.deferred_exitsc                 C   rE   )z&The list of exits of the forward loop.)r2   rF   r   r   r   Úforward_loop_exits  rG   z!_GradLoopState.forward_loop_exitsc                 C   rE   )z1The number of exits we expect to see but haven't.©r4   rF   r   r   r   Úpending_exits_count  rG   z"_GradLoopState.pending_exits_countc                 C   s
   || _ dS )zSet the pending count to cnt.NrU   )r=   rA   r   r   r   rV   !  s   
Fc              
   C   sð  | j j ¡ è t ¡  ¡ }t d¡Ì |r| ¡  t |¡& t	 
|j¡s-t dtj¡}nt|| jƒ}tj||jjdd}W d  ƒ n1 sGw   Y  |rR| ¡  | j |¡}| jj}t	 |j¡}|| jkr‚| j ¡  tj|||d}	| j ¡  | jj |	j¡ n>t|tjƒsŽt d| ƒ‚|r©|j! ¡  tj|||d}	|j! ¡  |	j "|¡ n| ¡  tj|||d}	| ¡  | j# |	j¡ | jjj$d j}
|	j |
¡ |W  d  ƒ W  d  ƒ S 1 sáw   Y  W d  ƒ dS 1 sñw   Y  dS )aÆ  Add an accumulator for each forward tensor that is needed in backprop.

    This is added to the forward loop at the first time when a tensor
    in the forward loop is used by backprop gradient computation loop.
    We create an accumulator that accumulates the value of tensor at each
    iteration. Called in the control flow context where gradients() is called.

    The pseudocode is:
    ```
      acc = stack();
      while (_pivot) {
        acc = stack_push(acc, value);
      }
    ```

    We make sure that the stack push op in one iteration is executed before
    next iteration. This is achieved by adding a control edge from
    `forward_index.op.inputs[0].op` to the push op, and another control
    edge from the push op to either `forward_index.op` or `forward_sync`.

    Args:
      value: The source tensor in forward that is to be accumulated.
      dead_branch: True iff the tensor is on a dead branch of a cond.

    Returns:
      The stack that contains the accumulated history of the tensor.

    Raises:
      TypeError: For internal errors involving the value condition context.
      ValueError: If `value` is inside a XLA scope and a valid max size
        for the stack can't be found.
    NéÿÿÿÿZf_acc)r   Z	elem_typer   )r#   z#value_ctxt is not a CondContext: %sr   )%r'   Úgraphr7   r   r   r   rI   r8   Úcolocate_withr   ZIsInXLAContextr   r   r   r   Zint32r   r5   r   Zstack_v2ÚdtypeÚ
base_dtyper9   ÚAddValuer#   ZGetOutputContextZstack_push_v2rB   rL   Ú
isinstancer   ÚCondContextÚ	TypeErrorr   rK   rM   Úinputs)r=   r   Údead_branchr   r   ÚaccZ	enter_accZswap_enabledÚ
value_ctxtÚpushZadd_opr   r   r   r:   &  sb   "ÿ
ÿø


ÿ

ÿ
ÿÌþ"þz$_GradLoopState.AddForwardAccumulatorc                 C   s2  |j  ¡ }d}|j  ¡ }|r$||kr$t|tjƒr|}q$|j}|r$||kst d¡W | j 	¡  |re| }d}|du rL|rL|j
 |jj¡}|j}|du rL|s;|du rS|j}|rZd|j n|j}	t ||¡|	 }t ||jj¡}
|
 | ¡ ¡ | j ¡  W d  ƒ n1 sƒw   Y  | jj}|dkr—| j |
j ¡ |
S )aù  Add the getter for an accumulated value in the grad context.

    This is added to the backprop loop. Called in the grad context to
    get the value of an accumulated value. The stack pop op must be guarded
    by the pred of the controlling cond.

    Args:
      history_value: The history (a stack) of a value.
      value: The value that is pushed onto the stack.
      dead_branch: True iff the tensor is on a dead branch of a cond.

    Returns:
      The current value (the top of the stack).
    Nr   )r   r   r]   r   r^   r   r   rI   r;   r8   rP   ÚgetÚpredr   r?   ÚbranchÚ_SwitchRefOrTensorr   Zstack_pop_v2rZ   r[   Ú	set_shapeÚ	get_shaper9   r!   rO   rL   )r=   Úhistory_valuer   ra   Zhistory_ctxtZ	cond_ctxtrc   r$   rf   rg   Úpopr!   r   r   r   r<   €  sH   

ü
þÿÿÿïz*_GradLoopState.AddBackpropAccumulatedValuec                 C   sâ   |j jdvsJ ‚| j |j¡}|du ro|}| }	 t |¡}|r3|jd }|j}|du r2| j	 
|¡}qUn!t |¡rDtjt |¡|jd}qU| j	 ¡  | |¡}| j	 ¡  qUq|du ri| ||¡}|| kri| j	 
|¡}|| j|j< |S )a²  Get the real value of `value`.

    If backprop "uses" a value produced by forward inference, an accumulator
    is added in the forward loop to accumulate its values.  We use the
    accumulated value. This method must be called in the grad loop context.
    `value` must be in forward and needed for backprop.

    Args:
      value: A tensor to be captured.

    Returns:
      The same tensor obtained from the saved history.
    )ÚVariableZ
VariableV2NTr   ©rZ   )r   Útyper,   re   r   r   ZGetLoopConstantEnterr`   r?   r)   r\   r   Úis_constantr   r   r   rZ   r9   r:   r8   r<   )r=   r   Ú
real_valueZ	cur_valueZcur_grad_stateZenter_oprk   r   r   r   ÚGetRealValue±  s@   

ú
ÿ


æÿz_GradLoopState.GetRealValueN)F)Ú__name__Ú
__module__Ú__qualname__Ú__doc__rD   Úpropertyr?   r5   rB   rM   r;   rN   rO   rP   rQ   rR   rS   rT   rV   Úsetterr:   r<   rr   r   r   r   r   r    h   sF    V














[
ÿ1r    c                   @   sX   e Zd ZdZdd„ Zdd„ Zdd„ Zdd	„ Zd
d„ Zdd„ Z	dd„ Z
dd„ Zdd„ ZdS )Ú_ControlFlowStatez9Maintain the mapping from the loops to their grad states.c                 C   s
   i | _ d S )N)Ú_maprF   r   r   r   rD   í  s   
z_ControlFlowState.__init__c                 C   sH   |rt  |¡r| ¡ }|j}|r| ¡ }nt  |¡}|r"| j |¡S dS )zDReturn the grad state for this op if it's in a forward loop context.N)r   Ú
IsLoopExitr   r   ÚGetWhileContextrz   re   )r=   r   Úbeforer>   r   r   r   ÚGetGradStateð  s   €
z_ControlFlowState.GetGradStatec                 C   s’   g }| j  ¡ D ]?}|jD ]&}||j dkr2| jd8  _|j|vr'|j |¡ |jdkr2| |j¡ q|jj	D ]}||j dkrEd||j< q7q|S )aN  Process all the "unused" loop exits.

    The "unused" exits of the loops are added to `unused_exits`. An exit is
    unused if its pending_count is 0. If there is an exit with real gradient,
    all these deferred exits will enter the backprop loop with zero gradient.
    Otherwise, they will enter the backprop loop with None. As an example,
    people often write:

    ```python
    v1, _ = tf.while_loop(p, b, [x1, x2])
    result = gradients(v1, x1)
    ```

    The exit node for x2 is not included by the betweenness analysis. But we
    need to backprop x2 if x2 is involved in computing v1.

    Args:
      pending_count: The number of backprop inputs for every op.
      to_ops_set: The set of ops for ys in gradients(ys, xs)

    Returns:
      The set of unused loop exits that we know at this point we need
      to backprop.
    r   r   )
rz   ÚvaluesrT   r   rV   rR   ÚappendÚextendr5   Zloop_enters)r=   Úpending_countZ
to_ops_setr1   r$   Úyr   r   r   ÚProcessUnusedLoopExitsý  s    


€
€þz(_ControlFlowState.ProcessUnusedLoopExitsc                 C   ó"   |   ||¡}|r|j ¡  dS dS )z0Enter the WhileContext for gradient computation.N)r~   r;   r8   ©r=   r   r}   r$   r   r   r   ÚEnterGradWhileContext%  ó   ÿz'_ControlFlowState.EnterGradWhileContextc                 C   r…   )z/Exit the WhileContext for gradient computation.N)r~   r;   r9   r†   r   r   r   ÚExitGradWhileContext+  rˆ   z&_ControlFlowState.ExitGradWhileContextc           	      C   sŽ   t  |¡}| j |¡}|du rC|j}|r| ¡ }d}|r"| j |¡}t||ƒ}|| j|< |jD ]}|j|vrB| |j¡ | 	|j¡ q/dS dS )a  Add the grad state for the while loop that op belongs to.

    Note that op is an Exit, and this method must be called in
    the control flow context where gradients() is called.

    Note that this method modifies `between_op_list` and `between_ops`.
    N)
r   r|   rz   re   r   r    rT   r   Úaddr€   )	r=   r   Úbetween_op_listÚbetween_opsr>   r$   r@   r?   Z	loop_exitr   r   r   ÚAddWhileContext1  s$   




€ñz!_ControlFlowState.AddWhileContextc                 C   sú   |  ¡ }|j ¡ }|j}|r| ¡ }d}|r| j |¡}|rf| ¡ r6|j 	¡  t
 |j|j¡}|j ¡  |S |j 	¡  t
j|dd}|j ¡  | |¡}|j}	|	 	¡  | ||¡}
t
 |
|j¡}|	 ¡  |S | ¡ rtt
 |j|j¡}|S t
j|dd}|S )a   Create zeros_like gradient for a loop exit.

    If the result of a loop variable is not used but is involved in
    computing the result of some needed loop variable, we create a
    zero-valued tensor that is fed as gradient for the Exit node of that
    loop variable. Note that val.op is an Exit, and this method must be
    called in the control flow context where gradients() is called.

    Args:
      val: The output tensor of an Exit op.

    Returns:
      A zero tensor of the same shape of val.
    NF©Úoptimize)rj   r   r   r   r|   rz   re   Úis_fully_definedr;   r8   r   ÚzerosÚdimsrZ   r9   Úshape_internalr:   r<   Ú
zeros_like)r=   ÚvalZ	val_shaper>   r@   r?   ÚresultÚshapeZhistory_shaperC   Z
real_shaper   r   r   ÚZerosLikeForExitL  s>   



ì

ÿ	úÿz"_ControlFlowState.ZerosLikeForExitc                 C   s¸  t  |¡rdS |jjrt |j| ¡S t  |¡}t  |¡}| j	 
|¡}|du r,t||ƒS | ¡ }tj|j| dd}| ¡ }| ¡ ry|jtjkrVtjt |¡t |¡d}	n
tjd|j|jd}	|rw|j 
|jj¡}
|j}t  !|	|
¡d|  }	|	S |r¬|j}
|j}|j" #¡  t  !|j$d |
¡d|  }tj%|dd	}|j" &¡  |j' (|¡ |j' (|¡ n| #¡  tj%|dd	}| &¡  |j) &¡  |j*||d
}|j) #¡  | +|||¡}t ||j¡}	|	S )a\  Create zeros_like for the specified output of an op.

    If op is in a while loop that is part of gradients(), this method
    must be called in its grad loop context.

    Args:
      op: A tensorflow operation.
      index: the index for a specific output of the op.

    Returns:
      A zero tensor of the same shape of op.outputs[index].
    NZtensorrH   rn   r   ©r—   rZ   r   FrŽ   )ra   ),r   ZIsLoopSwitchrX   Zbuilding_functionr   r”   ÚoutputsÚIsSwitchr|   rz   re   Ú	ZerosLiker   r   Zconvert_to_tensorrj   r   rZ   r   Úresourcer‘   r   Úvariable_shaper
   Úget_zeros_dtyper   r   r’   rP   rf   r   rg   r   rh   r   r8   r`   r“   r9   r   rK   r;   r:   r<   )r=   r   Úindexra   r>   r$   Úop_ctxtr•   r—   r–   rf   rg   Úzeros_shapeZhistory_zeros_shaper   r   r   ÚZerosLikeV1WhileLoopƒ  sd   



þæ
ÿÿ

ÿ
ÿz&_ControlFlowState.ZerosLikeV1WhileLoopc                 C   s  | j  ¡ D ]ƒ\}}|j ¡ D ]y\}}|jjd |jjd kr‡|jjd j}|jjd  ¡ }| ¡ rI|j 	¡  t
jd||d}t |¡}|j ¡  n7|jj}|rS| 	¡  |jjd j}	|	jd }
tj|
dd}t |¡}|rq| ¡  |j 	¡  t |¡}|j ¡  |j d|¡ qqdS )ac  Perform postprocessing at the end of gradients().

    We have created the gradient graph at this point. So this function
    can be used to perform any postprocessing on the gradient graph.
    We currently perform the following postprocessing:
      1. Patch the gradient graph if the output of a loop variable
         doesn't depend on its input.
    r   r   )rZ   r—   FrŽ   N)rz   ÚitemsrQ   r   r`   rZ   rj   r   r;   r8   r   r   r   Z_NextIterationr9   r   r   r“   r‘   Z_update_input)r=   Ú_r$   Zb_mergerZ   r—   Zgrad_valZnext_grad_valrC   Zenter_grad_opZ
enter_gradZ
grad_shaper   r   r   ÚPostProcessingÊ  s4   	






€ãÿz _ControlFlowState.PostProcessingN)rs   rt   ru   rv   rD   r~   r„   r‡   r‰   r   r˜   r£   r¦   r   r   r   r   ry   ê  s    (7Gry   c              	   C   sv   d}| D ]4}t  |¡r8|du rtƒ }|r1t |¡ | || |¡ W d  ƒ n1 s+w   Y  q| || |¡ q|S )aC  Create the state for all the while loops involved in one gradients().

  We create a _ControlFlowState when there are while loops involved in
  gradients(). In gradients(), control flow logic is only invoked when
  the _ControlFlowState is not None.

  Note that this method modifies `between_op_list` and `between_ops`.
  N)r   r{   ry   r   rY   r   )r‹   rŒ   Zcolocate_gradients_with_opsZ
loop_stater   r   r   r   ÚMaybeCreateControlFlowStateõ  s   

ÿ€€r§   c           	      C   sô   | j | }|  ¡ }|rs|j}|j}t | jd |¡d|  }t |¡}|j	t
jkrLt |g¡ tjt |¡t |¡dW  d  ƒ S 1 sGw   Y  tj|dd}t |g¡ tj||j	dW  d  ƒ S 1 slw   Y  dS tj|ddS )zBranch of ZerosLike for TF1.r   r   rn   NFrŽ   )rš   r   rf   rg   r   Úswitchr`   r   ÚidentityrZ   r   r   r   rI   r‘   r   rž   r
   rŸ   r“   r”   )	r   r    r•   r¡   rf   rg   Z
switch_valZpivotr¢   r   r   r   Ú_ZerosLikeV1  s&   

þ ÿ$ÿrª   c                 C   s–   | j | }|jtjkrtjt |¡t 	|¡dS t
|jjtjƒrD|jtjkrD|j ¡ r6tjd|jj|jdS tj|dd}t ||j¡S tj|ddS )zBranch of ZerosLike for TF2.rn   r   r™   FrŽ   )rš   rZ   r   r   r   r‘   r   rž   r
   rŸ   r]   r   rX   r	   ZWhileBodyFuncGraphÚvariantr—   r   r   r   r’   r“   r”   )r   r    r•   r¢   r   r   r   Ú_ZerosLikeV2&  s   
þ
ÿ
r¬   c                 C   s   t  | ¡s
t| |ƒS t| |ƒS )z4Create zeros_like for the specified output of an op.)r   r›   r¬   rª   )r   r    r   r   r   rœ   >  s   


rœ   N)rv   Ztensorflow.python.frameworkr   r   r   r   Ztensorflow.python.opsr   r   r   r   r	   r
   r   r   r   r   r    ry   r§   rª   r¬   rœ   r   r   r   r   Ú<module>   s2   G     