o
    ?e%                     @   sr   d Z ddlZddlZddlmZ ddlmZ ddlmZ	 ddl
mZ ddlmZ ddlmZ G d	d
 d
ejZdS )z6Test utilities for tf.data benchmarking functionality.    N)session)dataset_ops)options)nest)context)testc                   @   sT   e Zd ZdZdd Z	dddZdd	d
Z				dddZ					dddZdS )DatasetBenchmarkBasez"Base class for dataset benchmarks.c           	      C   sn   g }t  s
tdt|D ]#}|rt|}t| t|}t }t| t }|||  qt	|S )aJ  Benchmark the iterable in eager mode.

    Runs the iterable `iters` times. In each iteration, the benchmark measures
    the time it takes to go execute the iterable.

    Args:
      iterable: The tf op or tf.data Dataset to benchmark.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.

    Returns:
      A float, representing the median time (with respect to `iters`)
      it takes for the iterable to be executed `iters` num of times.

    Raises:
      RuntimeError: When executed in graph mode.
    z7Eager mode benchmarking is not supported in graph mode.)
r   executing_eagerlyRuntimeErrorrangeiternexttimeappendnpmedian)	selfiterableiterswarmupdeltas_iteratorstartend r   q/home/www/facesmatcher.com/pyenv/lib/python3.10/site-packages/tensorflow/python/data/benchmarks/benchmark_base.py_run_eager_benchmark   s   
z)DatasetBenchmarkBase._run_eager_benchmarkNc              	   C   s   g }t  r
tdt|D ]A}tj|d*}|r%|r || || |r,|| t }	|| t }
W d   n1 sCw   Y  ||
|	  qt	
|S )a.  Benchmarks the iterable in graph mode.

    Runs the iterable `iters` times. In each iteration, the benchmark measures
    the time it takes to go execute the iterable.

    Args:
      iterable: The tf op or tf.data Dataset to benchmark.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.
      initializer: The initializer op required to initialize the iterable.

    Returns:
      A float, representing the median time (with respect to `iters`)
      it takes for the iterable to be executed `iters` num of times.

    Raises:
      RuntimeError: When executed in eager mode.
    z7Graph mode benchmarking is not supported in eager mode.)configN)r   r	   r
   r   r   Sessionrunr   r   r   r   )r   r   r   r   session_configinitializerr   r   sessr   r   r   r   r   _run_graph_benchmarkC   s&   





z)DatasetBenchmarkBase._run_graph_benchmark   Tc                 C   s*   t  r| j|||dS | j||||dS )a  Benchmarks the op.

    Runs the op `iters` times. In each iteration, the benchmark measures
    the time it takes to go execute the op.

    Args:
      op: The tf op to benchmark.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.

    Returns:
      A float, representing the per-execution wall time of the op in seconds.
      This is the median time (with respect to `iters`) it takes for the op
      to be executed `iters` num of times.
    r   r   r   )r   r   r   r!   )r   r	   r   r$   )r   opr   r   r!   r   r   r   run_op_benchmarks   s
   z%DatasetBenchmarkBase.run_op_benchmarkFc                 C   s   t  }||j_||}||d }t r&| j|||d}|t	| S t
|}	|	 }
t|
d j}| j|||||	jd}|t	| S )a  Benchmarks the dataset.

    Runs the dataset `iters` times. In each iteration, the benchmark measures
    the time it takes to go through `num_elements` elements of the dataset.

    Args:
      dataset: Dataset to benchmark.
      num_elements: Number of dataset elements to iterate through each benchmark
        iteration.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.
      apply_default_optimizations: Determines whether default optimizations
        should be applied.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.

    Returns:
      A float, representing the per-element wall time of the dataset in seconds.
      This is the median time (with respect to `iters`) it takes for the dataset
      to go through `num_elements` elements, divided by `num_elements.`
    r%   r&   r   )r   r   r   r!   r"   )options_libOptionsZexperimental_optimizationapply_default_optimizationsZwith_optionsskipr   r	   r   floatr   Zmake_initializable_iteratorZget_nextr   flattenr'   r$   r"   )r   datasetnum_elementsr   r   r+   r!   r   Zmedian_durationr   Znext_elementr'   r   r   r   run_benchmark   s*   

z"DatasetBenchmarkBase.run_benchmark   c	           
      C   sn   | j ||||||d}	|du ri }t rd|}d|d< n	d|}d|d< ||d< | j|	|||d	 |	S )
a  Benchmarks the dataset and reports the stats.

    Runs the dataset `iters` times. In each iteration, the benchmark measures
    the time it takes to go through `num_elements` elements of the dataset.
    This is followed by logging/printing the benchmark stats.

    Args:
      dataset: Dataset to benchmark.
      num_elements: Number of dataset elements to iterate through each benchmark
        iteration.
      name: Name of the benchmark.
      iters: Number of times to repeat the timing.
      extras: A dict which maps string keys to additional benchmark info.
      warmup: If true, warms up the session caches by running an untimed run.
      apply_default_optimizations: Determines whether default optimizations
        should be applied.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.

    Returns:
      A float, representing the per-element wall time of the dataset in seconds.
      This is the median time (with respect to `iters`) it takes for the dataset
      to go through `num_elements` elements, divided by `num_elements.`
    )r/   r0   r   r   r+   r!   Nz{}.eagereagerimplementationz{}.graphgraphr0   )	wall_timer   nameextras)r1   r   r	   formatZreport_benchmark)
r   r/   r0   r7   r   r8   r   r+   r!   r6   r   r   r   run_and_report_benchmark   s(   !


z-DatasetBenchmarkBase.run_and_report_benchmark)N)r%   TN)r%   TFN)r2   NTFN)	__name__
__module____qualname____doc__r   r$   r(   r1   r:   r   r   r   r   r      s"    )

0
?r   )r>   r   numpyr   Ztensorflow.python.clientr   Ztensorflow.python.data.opsr   r   r)   Ztensorflow.python.data.utilr   Ztensorflow.python.eagerr   Ztensorflow.python.platformr   Z	Benchmarkr   r   r   r   r   <module>   s   