o
    ?eH                     @   sX  d Z ddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z ddlmZ ddlmZ dd	lmZ dd
lmZ ddlZddlZddlmZ ejjdkrQddlZnddlmZ edgde ejZejZedd)ddZedgddd Zedgddd Zeddd Zeddd Zed d!d" Z ed#d$d% Z!ed&d'd( Z"dS )*zTesting.    )	test_util)
googletest)assert_equal_graph_def)create_local_cluster)TensorFlowTestCase)gpu_device_name)is_gpu_available)compute_gradient_error)compute_gradientN)	tf_export   )mockz	test.mock)v1z	test.mainc                 C   s   t   t| S )zRuns all unit tests.)
_test_utilZInstallStackTraceHandler_googletestmain)argv r   `/home/www/facesmatcher.com/pyenv/lib/python3.10/site-packages/tensorflow/python/platform/test.pyr   4   s   
r   ztest.get_temp_dirc                   C      t  S )a  Returns a temporary directory for use during tests.

  There is no need to delete the directory after the test.

  @compatibility(TF2)
  This function is removed in TF2. Please use `TestCase.get_temp_dir` instead
  in a test case.
  Outside of a unit test, obtain a temporary directory through Python's
  `tempfile` module.
  @end_compatibility

  Returns:
    The temporary directory.
  )r   Z
GetTempDirr   r   r   r   get_temp_dir;   s   r   ztest.test_src_dir_pathc                 C   s
   t | S )zCreates an absolute test srcdir path given a relative path.

  Args:
    relative_path: a path relative to tensorflow root.
      e.g. "core/platform".

  Returns:
    An absolute path to the linked in runfiles.
  )r   test_src_dir_path)Zrelative_pathr   r   r   r   N   s   
r   ztest.is_built_with_cudac                   C   r   )a%  Returns whether TensorFlow was built with CUDA (GPU) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with CUDA (GPU).

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_cuda():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is built with CUDA.
  )r   ZIsGoogleCudaEnabledr   r   r   r   is_built_with_cuda\      r   ztest.is_built_with_rocmc                   C   r   )a)  Returns whether TensorFlow was built with ROCm (GPU) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with ROCm (GPU).

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_rocm():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is NOT built with ROCm.
  )r   ZIsBuiltWithROCmr   r   r   r   is_built_with_rocmq   r   r   ztest.disable_with_predicatec                    s    fdd}|S )z"Disables the test if pred is true.c                    s   t   fdd}|S )Nc                    s*    r
|   d S  | g|R i |S N)ZskipTest)selfargskwargs)funcpredskip_messager   r   wrapper_disable_with_predicate   s   zhdisable_with_predicate.<locals>.decorator_disable_with_predicate.<locals>.wrapper_disable_with_predicate)	functoolswraps)r   r"   r    r!   )r   r    decorator_disable_with_predicate   s   z@disable_with_predicate.<locals>.decorator_disable_with_predicater   )r    r!   r&   r   r%   r   disable_with_predicate   s   r'   ztest.is_built_with_gpu_supportc                   C   s   t  pt S )a9  Returns whether TensorFlow was built with GPU (CUDA or ROCm) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with GPU.

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_gpu_support():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is built with CUDA GPU support.
  )r   r   r   r   r   r   is_built_with_gpu_support   s   r(   ztest.is_built_with_xlac                   C   r   )a`  Returns whether TensorFlow was built with XLA support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with XLA.

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_xla(self):
  ...     if not tf.test.is_built_with_xla():
  ...       self.skipTest("test is only applicable on XLA")

  ...     @tf.function(jit_compile=True)
  ...     def add(x, y):
  ...       return tf.math.add(x, y)
  ...
  ...     self.assertEqual(add(tf.ones(()), tf.ones(())), 2.0)

  TensorFlow official binary is built with XLA.
  )r   ZIsBuiltWithXLAr   r   r   r   is_built_with_xla   s   r)   r   )#__doc__Ztensorflow.python.frameworkr   r   Ztensorflow.python.platformr   r   Z%tensorflow.python.framework.test_utilr   r   r   ZTestCaser   r   Z&tensorflow.python.ops.gradient_checkerr	   r
   r#   sysZ tensorflow.python.util.tf_exportr   version_infomajorr   ZunittestZ	BenchmarkZStubOutForTestingr   r   r   r   r   r'   r(   r)   r   r   r   r   <module>   sF   








