o
    ?eC                     @   s|  d Z ddlZddlZddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ ddl
mZ dd	l
mZ dd
l
mZ ddl
mZ ddl
mZ ddl
mZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ  ddl!m"Z" ddl!m#Z# ddl!m$Z$ ddl%m&Z& ddl'm(Z( dd Z)dd  Z*d!d" Z+	#d^d$d%Z,d&d' Z-d(d) Z.ej/d*d+ Z0d,d- Z1d.d/ Z2d0d1 Z3d2d3 Z4d4d5 Z5d6d7 Z6d8d9 Z7d:d; Z8d<d= Z9d>d? Z:dd@dAdAddej;dfdBdCZ<dDdE Z=dFdG Z>dHdI Z?dJdK Z@dLdM ZAdNdO ZBdPdQ ZCdRdS ZDdTdU ZEe(dVG dWdV dVZF	d_dXdYZGdZHdZZId[ZJd\d] ZKdS )`z=Implements the graph generation for computation of gradients.    N)attr_value_pb2)
pywrap_tfe)backprop_util)context)composite_tensor)composite_tensor_gradient)dtypes)indexed_slices)ops)tensor)tensor_shape)	array_ops)control_flow_ops)control_flow_state)control_flow_util)default_gradient)gen_functional_ops)math_ops)resource_variable_ops)UnconnectedGradients)
tf_logging)compat)object_identity)variable_utils)collections_abc)	tf_exportc                 C   sb   t  }||  |r/| }||vr+|| |jD ]}t|r*|t|| q|sdS dS )a  Mark all ops reached from "from_ops".

  Args:
    from_ops: list of Operations.
    reached_ops: set of Operations.
    func_graphs: list of FuncGraphs. This method will traverse through
      these functions if they capture from_ops or any reachable ops.
  N)	collectionsdequeextendpopleftaddoutputsr   IsTrainable
_Consumers)from_opsreached_opsfunc_graphsqueueopoutput r*   e/home/www/facesmatcher.com/pyenv/lib/python3.10/site-packages/tensorflow/python/ops/gradients_util.py_MarkReachedOps0   s   	



r,   c                    s   t   t| | t  fdd| D }t  }g }t }||  |rK| }	|	 v rI||	 ||	  |	 t	|	|D ]}
||
j
 q@|s$t|||}tt}|D ]}	t	|	|D ]}|j
|v rp||j
  d7  < q`qY|||fS )a  Initialize the pending count for ops between two lists of Operations.

  'pending_count[op]' indicates the number of backprop inputs
  to this operation.

  Args:
    to_ops: list of Operations.
    from_ops: list of Operations.
    colocate_gradients_with_ops: Python bool.  See docstring of gradients().
    func_graphs: list of FuncGraphs. This method will traverse through
      these functions if they capture from_ops or any reachable ops. This is
      useful if to_ops occur in a function and from_ops are in an outer function
      or graph.
    xs_set: ObjectIdentitySet of Tensors.

  Returns:
    A tuple containing: (1) the subset of to_ops reachable from from_ops by a
    path of zero or more backpropagatable tensors, (2) a mapping from operation
    to the number of backprop inputs to that op, and (3) a ControlFlowState
    object which is not None if the ops between from_ops and to_ops contain
    control flow loops.
  c                 3   s    | ]	}| v r|V  qd S Nr*   .0r(   r%   r*   r+   	<genexpr>b       z _PendingCount.<locals>.<genexpr>   )setr,   r   r   r   r   r    appendremove_NonEagerInputsr(   r   ZMaybeCreateControlFlowStatedefaultdictint)to_opsr$   colocate_gradients_with_opsr&   xs_setreachable_to_opsZbetween_opsZbetween_op_listr'   r(   inp
loop_statepending_countxr*   r0   r+   _PendingCountD   s6   






rB   c                 C   s   t | ttfr	| S | gS r-   )
isinstancelisttuplerA   r*   r*   r+   _AsList   s   rG   __unsupported__c                 C   s  t | t |krtdt |  dt | dtj| dd} g }tt|| D ]?\}\}}t|j||( |du rh|jj	rOt
d| dt|jj d	|tjt||jd
| d 	 W d   q'|jjsp|jjr|jjs|jjst
dt|jj d| dt|jj dnn|jj	r|jj	st
dt|jj d| dt|jj dnM|jtjkr|jtjkrt
dt|jj d| dt|jj dn(|jtjkr|jtjkrt
d| d| dnt
d| dt|jj dt|tjrK|tjt|jtjrtj|jd| dn|jt|jtjr0tj|jd| dn|jt|jtjrDtj|jd| dn|jd n|tj|d
| d W d   n	1 sbw   Y  q'|S )aG  Fill in default values for grad_ys.

  Args:
    grad_ys: List of gradients, can contain None.
    ys: List of tensors.
    colocate_gradients_with_ops: If True, try colocating gradients with
      the corresponding op.
    gradient_uid: A unique identifier within the graph indicating
      which invocation of gradients is being executed. Used to cluster
      ops for compilation.

  Returns:
    A list of gradients to use, without None.

  Raises:
    ValueError: If sizes of gradients and inputs don't match
    TypeError: If type of any gradient is not valid for its input.
  zLength mismatch. Passed z grad_ys for z ysgrad_ynameNzGradients of complex tensors (z) must set grad_ys (y.dtype = )z
grad_ys_%d)dtyperK   zGradient type z- generated for real or integer-valued tensor z with type z must be real or integerz% generated for complex-valued tensor z must be realz generated for variant tensor z must be variantzInput gradient z for resource tensor z should not be a resourcezTensor z- must be numeric to obtain a default gradientzgrad_ys_%d_indiceszgrad_ys_%d_valueszgrad_ys_%d_shape)indicesvaluesdense_shape)len
ValueErrorr	   %convert_n_to_tensor_or_indexed_slices	enumeratezip_maybe_colocate_withr(   rM   Z
is_complex	TypeErrorr   Zas_dtyperK   r5   r   ZonesshapeZis_floating
is_integervariantresourcerC   IndexedSlicesrN   
tensor_libTensoridentityrO   rP   )grad_ysysr;   gradient_uidZnew_grad_ysiyrI   r*   r*   r+   _DefaultGradYs   s   



Ore   c                 C   sT   |j dks
|j dkrdS t| t|jkr(tdt|  d|j dt|j dS )a.  Verify that gradients are valid in number and type.

  Args:
    grads: List of generated gradients.
    op: Operation for which the gradients where generated.

  Raises:
    ValueError: if sizes of gradients and inputs don't match.
    TypeError: if type of any gradient is not valid for its input.
  WhileZStatelessWhileNzNum gradients z generated for op z do not match num inputs )typerQ   inputsrR   node_defgradsr(   r*   r*   r+   _VerifyGeneratedGradients   s   rl   c                 C   s`   t  }| D ]}d}t||D ]}||j dkrd} qq|r#|| q|dd |D  |S )a  The set of ops that terminate the gradient computation.

  This computes the frontier of the forward graph *before* which backprop
  should stop. Operations in the returned set will not be differentiated.
  This set is defined as the subset of `from_ops` containing ops that have
  no predecessor in `from_ops`. `pending_count` is the result of
  `_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`
  iff pending_count[op] > 0.

  In addition, none of `stop_gradient_ops` will be differentiated.

  Args:
    from_ops: list of Operations.
    stop_gradient_ops: list of Operations never to backprop through.
    pending_count: mapping from operation to number of backprop inputs.
    xs_set: ObjectIdentitySet of Tensors.

  Returns:
    The set of operations.
  Tr   Fc                 s   s    | ]}|V  qd S r-   r*   r.   r*   r*   r+   r1   *  s    z_StopOps.<locals>.<genexpr>)r4   r7   r(   r    update)r$   stop_gradient_opsr@   r<   stop_opsr(   Z
is_stop_opr>   r*   r*   r+   _StopOps  s   
rp   c                 c   sH    |rt | | dV  W d   dS 1 sw   Y  dS dV  dS )z?Context to colocate with `op` if `colocate_gradients_with_ops`.N)r
   _colocate_with_for_gradient)r(   rb   r;   r*   r*   r+   rV   .  s   "
rV   c                 C   s   | j dkp	| j dkS )NZPartitionedCallZStatefulPartitionedCall)rg   r(   r*   r*   r+   _IsPartitionedCall8  s   rs   c                 C   s   dd | j D | }dd | j D }t }t| r"| dj|_n| j|_| jjD ]}|j| 	| jj|  q*t
j|||d}|S )zFBackprop through a function call node op given its outputs' gradients.c                 S   s   g | ]}|qS r*   r*   r/   rA   r*   r*   r+   
<listcomp>>      z_SymGrad.<locals>.<listcomp>c                 S   s   g | ]}t |qS r*   )r   get_zeros_dtypert   r*   r*   r+   ru   ?  s    f)inputZToutrx   )rh   r   ZNameAttrListrs   get_attrrK   rg   ri   attrZCopyFromr   Zsymbolic_gradient)r(   	out_gradsZf_inZf_typesrx   kin_gradsr*   r*   r+   _SymGrad<  s   r   c           	      C   s   |  ddd} |dur%|jjd j}|jjd j}|jjd j }nz|d}|d}|d }W n tyC   d}Y nw |sI| S |rRd|| f }n|}t	j
|d	t	j
| d
d}t | | W  d   S 1 sxw   Y  dS )z@Compile the calculation in grad_fn if op was marked as compiled./_N_XlaCompileZ_XlaSeparateCompiledGradients	_XlaScopeFz
%s_grad_%s)b)s)r   r   )rstripreplaceZcached_definitionr{   r   r   decoderz   rR   r   Z	AttrValueencoder
   get_default_graphZ_attr_scope)	scoper(   funcgrad_fnZxla_compileZxla_separate_compiled_gradientsZ	xla_scopeZxla_grad_scopeattrsr*   r*   r+   _MaybeCompileK  s:   

$r   c                 C   s|   d}t | g}t }|r1| }||v rq|| ||v r"|}q1|dd t||D  |s|s5J td|j d)z2Raises an error if we backprop through a loop var.Nc                 s   s    | ]}|j V  qd S r-   rr   r/   tr*   r*   r+   r1   ~  s    z5_RaiseNoGradWrtInitialLoopValError.<locals>.<genexpr>z>Cannot compute gradient inside while loop with respect to op 'z'. We do not support taking the gradient wrt or through the initial value of a loop variable. Gradients can be computed through loop invariants or wrt the input parameters to the loop body.)	r   r   r4   r   r    r   r7   rR   rK   )r(   r$   r<   Z	target_opr'   visitedZcurr_opr*   r*   r+   "_RaiseNoGradWrtInitialLoopValErrorp  s$   

r   c                 C   s   t | tjo| jS r-   )rC   r
   ZGraphZ_building_function)graphr*   r*   r+   _IsFunction  s   r   c                 C   s   t | sJ | jS r-   )r   Zcaptures)Z
func_graphr*   r*   r+   	_Captures  s   r   c                 C   sR   t | tjs't| jjr'| jjdkr't| jjD ]\}}| |u r&t|  S q| S )zIf t is a captured value placeholder, returns the original captured value.

  Args:
    t: Tensor

  Returns:
    A tensor, potentially from a different Graph/FuncGraph.
  ZPlaceholder)	rC   r
   EagerTensorr   r(   r   rg   r   _MaybeCaptured)r   input_tZplaceholder_tr*   r*   r+   r     s   


r   c                 C   s   dd t | |D S )a  Returns the inputs of op, crossing closure boundaries where necessary.

  Does not return any captured EagerTensors, i.e., the number of tensors
  returned may be less than the actual number of inputs.

  Args:
    op: Operation
    xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.

  Returns:
    A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
    is in a FuncGraph and has captured inputs.
  c                 S   s   g | ]
}t |tjs|qS r*   )rC   r
   r   r   r*   r*   r+   ru     s    z#_NonEagerInputs.<locals>.<listcomp>)_Inputs)r(   r<   r*   r*   r+   r7     s   r7   c                 C   s>   t | jrg }| jD ]}||vrt|}|| q
|S | jS )a5  Returns the inputs of op, crossing closure boundaries where necessary.

  Args:
    op: Operation
    xs_set: ObjectIdentitySet of Tensors we are differentiating w.r.t.

  Returns:
    A list of tensors. The tensors may be from multiple Graph/FuncGraphs if op
    is in a FuncGraph and has captured inputs.
  )r   r   rh   r   r5   )r(   r<   rh   r   r*   r*   r+   r     s   

r   c                 C   s@   |   }|D ]}t|D ]\}}|| u r|t|| qq|S )a  Returns the consumers of t, crossing closure boundaries where necessary.

  Args:
    t: Tensor
    func_graphs: a list of FuncGraphs that may have captured t.

  Returns:
    A list of tensors. The tensors will be from the current graph and/or
    func_graphs.
  )	consumersr   r   r#   )r   r&   r   r   r   placeholderr*   r*   r+   r#     s   r#   	gradientsFc
           +         s  t  rtdtt| } dd t|D }|dur t|}tdd |D s2tdd | D rZt|}
t| }|du rBdnt|}t	||
|||||||	
}t
||S |	du rbt }	ztW n tyw   td d	w g }|	}t|r|| |j}t|s|du rg nt|}|du rdgt|  }t|d
t| t| t| t| }t d}tj| dd} tj|ddd}t|}t|| ||}dd | D }dd |D }dd |D }t|||||\}}}i t| |D ]\}}t|| qt  }t! }|D ] | dk}|r5|vr5|v r5|" | q|r[|#||}|D ]}t$%|rYt||&| ||j' qBt(||||}|r|) t*||8 |r{|j+dd t,||||r|j-dd d d} t.}!|	/j0p|!}"tdd D }#|#r!|vr!zt1 W nj t2y    |"r|!rt345dj6}$|	7|$} | s t8|	dr |	j}%|%dur |%7|$} | durq t8|%dr|%j}%nq |%dusn|	7j0} t9d| } | j: nt2dj6 dj0 dY nw |r+|j+dd t;<rMj=durMj=> rMj=t ? krMt@||  sS|"r6|#r6tAD ]<\}&}'tB|'tCjDs|'s sn|"swt$%jE|& r|r|F|&|&< qZtGHjE|& rtIJ|&|&< qZtj6d  |	Kq  rtL||  fdd }(ntL|| fd!d }(t|(}(tM|( |rtd"d |(D d#krtNd& tjOd|dd$ tPQ|(}(W d   n	1 sw   Y  W d   n	1 s
w   Y  W d   n	1 sw   Y  W d   n	1 s*w   Y  tR|( n
dgttS| }(tAttS||(D ]L\}&\})}*|*durtB|*tCjDr|)jTtUjVkrz	|*W|)X  W n ty   td%j6 d&|& d'|)jY d(|*jY w tB|)tjZst|)|* qJ|r|j-dd W d   n	1 sw   Y  t[|||| |seW d   n	1 sw   Y  |r|\  fd)d|D S )*zImplementation of gradients().z[tf.gradients is not supported when eager execution is enabled. Use tf.GradientTape instead.c                 S   s    g | ]}t |r|jn|qS r*   )r   Zis_resource_variablehandlert   r*   r*   r+   ru     s    z$_GradientsHelper.<locals>.<listcomp>Nc                 s       | ]	}t |tjV  qd S r-   rC   r   ZCompositeTensorrt   r*   r*   r+   r1     r2   z#_GradientsHelper.<locals>.<genexpr>c                 s   r   r-   r   )r/   rd   r*   r*   r+   r1     r2   *Unknown value for unconnected_gradients: ''r   uidrd   rJ   rA   T)rK   Zas_refc                 S      g | ]}|j qS r*   rr   r   r*   r*   r+   ru   5      c                 S   r   r*   rr   r   r*   r*   r+   ru   6  r   c                 S   r   r*   rr   r   r*   r*   r+   ru   7  r   r   beforec                 s   s     | ]}t |tjp|V  qd S r-   rC   r]   r^   r/   gr*   r*   r+   r1   m  s    
rx   outer_graphZ__defunz"No gradient defined for operation'z' (op type: a  ). In general every operation must have an associated `@tf.RegisterGradient` for correct autodiff, which this op is lacking. If you want to pretend this operation is a constant in your program, you may insert `tf.stop_gradient`. This can be useful to silence the error in cases where you know gradients are not needed, e.g. the forward pass of tf.custom_gradient. Please see more details in https://www.tensorflow.org/api_docs/python/tf/custom_gradient.FZ_gradc                      s    gR  S r-   r*   r*   )r   r(   r|   r*   r+   <lambda>  rv   z"_GradientsHelper.<locals>.<lambda>c                      s
   t  S r-   )r   r*   )r(   r|   r*   r+   r     s   
 c                 S   s   g | ]}|d ur|qS r-   r*   rt   r*   r*   r+   ru     s    
r3   Zignore_existingzWIncompatible shapes between op input and calculated input gradient. Forward operation: z. Input index: z. Original input shape: z#. Calculated input gradient shape: c                    s   g | ]}t  |qS r*   )_GetGradrt   )rk   unconnected_gradientsr*   r+   ru     s    )]r   Zexecuting_eagerlyRuntimeErrorr   Zconvert_variables_to_tensorsrG   anyr   Zget_flat_tensors_for_gradients_GradientsHelperZ"replace_flat_tensors_for_gradientsr
   r   r   rR   r   r5   r   rQ   
name_scoperD   Zunique_namer	   rS   Z.internal_convert_n_to_tensor_or_indexed_slicesr   ZObjectIdentitySetre   rB   rU   _SetGradr   r   r4   r    ZProcessUnusedLoopExitsr   r"   ZerosLikeForExitr(   rp   r   rV   ZEnterGradWhileContext_AggregatedGradsZExitGradWhileContextrs   Z_is_functionrg   Zget_gradient_functionLookupErrorr   as_bytesrz   rK   Z_get_functionhasattrgetattrZpython_grad_funcr   ZIsSwitchZ_control_flow_contextZIsWhileContextZ_get_control_flow_contextr   rT   rC   r]   r^   r!   ZZerosLikeV1WhileLoopr   Zsupports_default_gradr   Z	ZerosLikeZ_original_opr   rl   devicerq   r   rE   _LogOpGradientsr   rM   r   r[   	set_shape	get_shaperX   r   _UpdatePendingAndEnqueueReadyZPostProcessing)+ra   Zxsr`   rK   r;   Zgate_gradientsaggregation_methodZstop_gradientsr   Z	src_graphZflat_xsZflat_ysZflat_grad_ysZ
flat_gradsr&   Z
curr_graphZ
grad_scoperb   r<   r:   r$   rn   r=   r@   r?   rd   rI   r'   Z
to_ops_setreadyZ
loop_exitsro   	func_callZis_partitioned_callZis_func_callZhas_out_grads	func_namer   rc   out_gradr~   Zt_inZin_gradr*   )r   rk   r(   r|   r   r+   r     s  















	'


 

    Xr   c                 C   sX   t | |}|D ]"}t|tjtjfr dS |r)t|tjr)tdd |D r) dS qdS )z%Return true iff op has real gradient.Tc                 s   s    | ]}|d uV  qd S r-   r*   r   r*   r*   r+   r1     s    z&_HasAnyNotNoneGrads.<locals>.<genexpr>F)		_GetGradsrC   r]   r^   r	   r\   r   Sequencer   )rk   r(   r|   r   r*   r*   r+   _HasAnyNotNoneGrads  s   
r   c                 C   s4  t ||D ]}||j  d8  < ||j dk}|r(|s(||j dko't|j}|rt|jr|j|jdd}|j| | jd8  _|jdkrd}	|jD ]}
t	| |
jr`d}	||
j qO|j
|
 qO|	r|j
D ]}
t|
r|t| |
||
 ||
j qlq|j
D ]}
||
j qq||j qdS )z@Update pending count for the inputs of op and enqueue ready ops.r3   r   Fr   TN)r7   r(   r   IsLoopSwitchZ
IsLoopExitZGetGradStateZdeferred_exitsr5   Zpending_exits_countr   Zunused_exitsr   r"   r   r   )rk   r(   r'   r@   r?   r<   rA   r   Z
grad_stateZhas_not_none_gradrd   r*   r*   r+   r     s:   




r   c                 C   sr   |j }| |}|sdd tt|jD }|| |< ||j }t|tr+|| dS t	
|s2J |||j< dS )z/Sets gradient "grad" in "grads" for tensor "t".c                 S      g | ]}g qS r*   r*   r/   r   r*   r*   r+   ru   2  rv   z_SetGrad.<locals>.<listcomp>N)r(   getrangerQ   r!   value_indexrC   rD   r5   r   r   )rk   r   gradr(   op_gradsZt_gradsr*   r*   r+   r   -  s   


r   c                 C   s8   t | }| jtjkrtjt| |dS tj	| |dS )N)rM   )
r   rw   rM   r   r[   r   Zzerosr   Zvariable_shapeZ
zeros_like)r   Zt_dtyper*   r*   r+   
_ZerosLike<  s   

r   c                 C   s~   |j }| |}|s"|tjkrt|S |tjkrdS td| d||j }|tjkr4|du r4t|S t|t	r=J d|S )zGets gradient for tensor "t".Nr   r   z2gradients list should have been aggregated by now.)
r(   r   r   ZZEROr   NONErR   r   rC   rD   )rk   r   r   r(   r   Zt_gradr*   r*   r+   r   E  s&   




r   c                 C   s(   || v r| | S dd t t|jD S )zGets all gradients for op.c                 S   r   r*   r*   r   r*   r*   r+   ru   a  rv   z_GetGrads.<locals>.<listcomp>)r   rQ   r!   rj   r*   r*   r+   r   \  s   r   c                 C   s0   t  }| D ]}t|tjr|| }q|S r-   )r   Zunknown_shaperC   r]   r^   Z
merge_withr   )rh   rX   rc   r*   r*   r+   _AccumulatorShaped  s   r   c              	      sf   t dd| j d  dd  t ddd fdd	|D  t dd
d fdd	|D  dS )z"Log the in and out grads of an op.r3   zGradient for 'r   c                 S   s&   | d u rdS t | ttfrt| S dS )NFT)rC   rD   rE   boolrF   r*   r*   r+   _FilterGradp  s
   z$_LogOpGradients.<locals>._FilterGradz  in  --> %sz, c                 3       | ]
} |r|j V  qd S r-   rJ   rt   r   r*   r+   r1   y      z"_LogOpGradients.<locals>.<genexpr>z  out --> %sc                 3   r   r-   rJ   rt   r   r*   r+   r1   {  r   N)loggingvlogrK   join)r(   r|   r~   r*   r   r+   r   l  s   r   c              	   C   s   t dd }| D ]
}||j | q	g }dd }t||dD ])}|| }tj|d j|dd |t	| W d	   n1 sDw   Y  q t	|S )
z/Adds tensors from potentially multiple devices.c                   S   s   g S r-   r*   r*   r*   r*   r+   r     s    z"_MultiDeviceAddN.<locals>.<lambda>c                 S   s   | d u rdS | S )N r*   )devr*   r*   r+   	DeviceKey  s   z#_MultiDeviceAddN.<locals>.DeviceKey)keyr   Tr   N)
r   r8   r   r5   sortedr
   rq   r(   r   add_n)Ztensor_listrb   Ztensors_on_devicer   Zsummandsr   r   tensorsr*   r*   r+   _MultiDeviceAddN~  s    
r   AggregationMethodc                   @   s    e Zd ZdZdZeZdZdZdS )r   a  A class listing aggregation methods used to combine gradients.

  Computing partial derivatives can require aggregating gradient
  contributions. This class lists the various methods that can
  be used to combine gradients in the graph.

  The following aggregation methods are part of the stable API for
  aggregating gradients:

  *  `ADD_N`: All of the gradient terms are summed as part of one
     operation using the "AddN" op (see `tf.add_n`). This
     method has the property that all gradients must be ready and
     buffered separately in memory before any aggregation is performed.
  *  `DEFAULT`: The system-chosen default aggregation method.

  The following aggregation methods are experimental and may not
  be supported in future releases:

  * `EXPERIMENTAL_TREE`: Gradient terms are summed in pairs using
    the "AddN" op. This method of summing gradients may reduce
    performance, but it can improve memory utilization because the
    gradients can be released earlier.
  * `EXPERIMENTAL_ACCUMULATE_N`: Same as `EXPERIMENTAL_TREE`.

  Example usage when computing gradient:

  >>> @tf.function
  ... def example():
  ...   x = tf.constant(1.0)
  ...   y = x * 2.0
  ...   z = y + y + y + y
  ...   return tf.gradients(z, [x, y],
  ...     aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
  >>> example()
  [<tf.Tensor: shape=(), dtype=float32, numpy=8.0>,
   <tf.Tensor: shape=(), dtype=float32, numpy=4.0>]

  r   r3      N)__name__
__module____qualname____doc__ADD_NDEFAULTEXPERIMENTAL_TREEEXPERIMENTAL_ACCUMULATE_Nr*   r*   r*   r+   r     s    &c              	   C   s  |du rt j}t jt jt jg}||vrtd| d| dt| |}t|D ]\}}|r>t|t	j
tjfr>t|s=J q't|tjrXtdd |D sXtd| d| d	|rt|d
k rid}	|d ||< q'tdd |D rt|}
|t jt jfv rd}	t|jd   |d }|dd D ]	}t||g}q|||< W d   n1 sw   Y  n	d}	t||||< td
dt||
|	 q't|||< q'd||< q'|S )a  Get the aggregated gradients for op.

  Args:
    grads: The map of memoized gradients.
    op: The op to get gradients for.
    gradient_uid: A unique identifier within the graph indicating
      which invocation of gradients is being executed. Used to cluster
      ops for compilation.
    loop_state: An object for maintaining the state of the while loops in the
                graph. It is of type ControlFlowState. None if the graph
                contains no while loops.
    aggregation_method: Specifies the method used to combine gradient terms.
      Accepted values are constants defined in the class `AggregationMethod`.

  Returns:
    A list of gradients, one per each output of `op`. If the gradients
      for a particular output is a list, this function aggregates it
      before returning.

  Raises:
    TypeError: if the incoming grads are not Tensors or IndexedSlices.
    ValueError: if the arguments are invalid.

  Nz'Invalid `aggregation_method` specified z. Accepted values are .c                 s   s*    | ]}|d urt |tjtjfV  qd S r-   )rC   r]   r^   r	   r\   r   r*   r*   r+   r1     s    
z#_AggregatedGrads.<locals>.<genexpr>zInvalid gradient z
 [index = z?]. Gradients have to be either all Tensors or all IndexedSlicesr   Znopr   c                 s   s$    | ]}|d urt |tjV  qd S r-   r   r   r*   r*   r+   r1     s    

treeZ_gradient_sumr3   r   z#  _AggregatedGrads %d x %s using %s)r   r   r   r   r   rR   r   rT   rC   r]   r^   r	   r\   r   r   r   r   allrW   rQ   r   r
   r   rK   r   r   r   r   r   r   ZAggregateIndexedSlicesGradients)rk   r(   rb   r?   r   Zvalid_aggregation_methodsr|   rc   r   usedr   Zrunning_sumr   r*   r*   r+   r     sf   


r   r3   r   c                 C   s
   t | S )z=Determines whether and how `args` may require tape gradients.)r   Z#TFE_Py_TapeSetPossibleGradientTypes)r   r*   r*   r+   PossibleTapeGradientTypes*  s   
r   )rH   r-   )Lr   r   
contextlibZtensorflow.core.frameworkr   Ztensorflow.pythonr   Ztensorflow.python.eagerr   r   Ztensorflow.python.frameworkr   r   r   r	   r
   r   r]   r   Ztensorflow.python.opsr   r   r   r   r   r   r   r   Z+tensorflow.python.ops.unconnected_gradientsr   Ztensorflow.python.platformr   r   Ztensorflow.python.utilr   r   r   Ztensorflow.python.util.compatr   Z tensorflow.python.util.tf_exportr   r,   rB   rG   re   rl   rp   contextmanagerrV   rs   r   r   r   r   r   r   r7   r   r#   r   r   r   r   r   r   r   r   r   r   r   r   r   ZPOSSIBLE_GRADIENT_TYPES_NONEZ#POSSIBLE_GRADIENT_TYPES_FIRST_ORDERZ$POSSIBLE_GRADIENT_TYPES_HIGHER_ORDERr   r*   r*   r*   r+   <module>   s   @
o"
	%
  &	2
\