Skip to content

Commit

Permalink
Enabled low_latency_op_mode logic. (tensorflow#337)
Browse files Browse the repository at this point in the history
* Enabled low_latency_op_mode logic.

* added snippet.

* removed accidental add.

* lint + format.

* strange format.

* fix wheel?

* Antonio feedback.

* missed rename.
MichaelBroughton authored Aug 21, 2020
1 parent 314c578 commit 3d6837a
Showing 8 changed files with 240 additions and 57 deletions.
1 change: 1 addition & 0 deletions release/BUILD
Original file line number Diff line number Diff line change
@@ -47,6 +47,7 @@ sh_binary(
"//tensorflow_quantum/python/layers/circuit_executors:unitary",
"//tensorflow_quantum/python/layers/high_level:controlled_pqc",
"//tensorflow_quantum/python/layers/high_level:pqc",
"//tensorflow_quantum/python:quantum_context",
"//tensorflow_quantum/python:util",
],
)
2 changes: 2 additions & 0 deletions scripts/import_test.py
Original file line number Diff line number Diff line change
@@ -36,7 +36,9 @@ def test_imports():

# Util functions.
_ = tfq.convert_to_tensor
_ = tfq.get_quantum_concurrent_op_mode
_ = tfq.from_tensor
_ = tfq.set_quantum_concurrent_op_mode
_ = tfq.util.get_supported_gates
_ = tfq.util.exponential

7 changes: 7 additions & 0 deletions tensorflow_quantum/__init__.py
Original file line number Diff line number Diff line change
@@ -25,6 +25,13 @@
# Re-label python module as layers module.
import tensorflow_quantum.python.layers as layers

# Import quantum_context getters and setters
from tensorflow_quantum.python.quantum_context import (
# Context setting functions.
get_quantum_concurrent_op_mode,
set_quantum_concurrent_op_mode,
)

# Import utility functions for tensor operations & conversions.
from tensorflow_quantum.python.util import (
# Utility functions
1 change: 1 addition & 0 deletions tensorflow_quantum/core/ops/BUILD
Original file line number Diff line number Diff line change
@@ -327,6 +327,7 @@ py_library(
"tfq_utility_ops_py",
":cirq_ops",
":tfq_simulate_ops_py",
"//tensorflow_quantum/python:quantum_context",
],
)

156 changes: 133 additions & 23 deletions tensorflow_quantum/core/ops/circuit_execution_ops.py
Original file line number Diff line number Diff line change
@@ -16,8 +16,13 @@
import enum

import cirq
import tensorflow as tf

from tensorflow_quantum.core.ops import (cirq_ops, tfq_simulate_ops,
tfq_utility_ops)
from tensorflow_quantum.python import quantum_context

_GLOBAL_OP_LOCK = tf.CriticalSection()


class TFQWavefunctionSimulator(enum.Enum):
@@ -28,7 +33,16 @@ class TFQWavefunctionSimulator(enum.Enum):
sampled_expectation = tfq_simulate_ops.tfq_simulate_sampled_expectation


def get_expectation_op(backend=None):
def _check_quantum_concurrent(quantum_concurrent):
if not isinstance(quantum_concurrent, bool):
raise TypeError("quantum_concurrent must be type bool."
" Given: {}".format(str(type(quantum_concurrent))))


def get_expectation_op(
backend=None,
*,
quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
"""Get a TensorFlow op that will calculate batches of expectation values.
This function produces a non-differentiable TF op that will calculate
@@ -67,9 +81,17 @@ def get_expectation_op(backend=None):
Args:
backend: Optional Python `object` that specifies what backend this op
should use when evaluating circuits. Can be any
`cirq.SimulatesFinalState`. If not provided the default C++ analytical
expectation calculation op is returned.
should use when evaluating circuits. Can be any
`cirq.SimulatesFinalState`. If not provided the default C++
analytical expectation calculation op is returned.
quantum_concurrent: Optional Python `bool`. True indicates that the
returned op should not block graph level parallelism on itself when
executing. False indicates that graph level parallelism on itself
should be blocked. Defaults to value specified in
`tfq.get_quantum_concurrent_op_mode` which defaults to True
(no blocking). This flag is only needed for advanced users when
using TFQ for very large simulations, or when running on a real
chip.
Returns:
A `callable` with the following signature:
@@ -97,11 +119,25 @@ def get_expectation_op(backend=None):
"""

# TODO (mbbrough): investigate how the above docstring renders.
_check_quantum_concurrent(quantum_concurrent)

op = None
if backend is None:
return TFQWavefunctionSimulator.expectation
op = TFQWavefunctionSimulator.expectation

if isinstance(backend, cirq.SimulatesFinalState):
return cirq_ops._get_cirq_analytical_expectation(backend)
op = cirq_ops._get_cirq_analytical_expectation(backend)

if op is not None:
if quantum_concurrent is True:
# Return an op that does not block graph level parallelism.
return lambda programs, symbol_names, symbol_values, pauli_sums: \
op(programs, symbol_names, symbol_values, pauli_sums)

# Return an op that does block graph level parallelism.
return lambda programs, symbol_names, symbol_values, pauli_sums: \
_GLOBAL_OP_LOCK.execute(lambda: op(
programs, symbol_names, symbol_values, pauli_sums))

if isinstance(backend, (cirq.SimulatesSamples, cirq.Sampler)):
raise NotImplementedError("Sample-based expectation is not supported."
@@ -112,7 +148,10 @@ def get_expectation_op(backend=None):
" or None.".format(backend))


def get_sampling_op(backend=None):
def get_sampling_op(
backend=None,
*,
quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
"""Get a Tensorflow op that produces samples from given quantum circuits.
This function produces a non-differentiable op that will calculate
@@ -142,6 +181,14 @@ def get_sampling_op(backend=None):
backend: Optional Python `object` that specifies what backend this op
should use when evaluating circuits. Can be any `cirq.Sampler`. If
not provided the default C++ sampling op is returned.
quantum_concurrent: Optional Python `bool`. True indicates that the
returned op should not block graph level parallelism on itself when
executing. False indicates that graph level parallelism on itself
should be blocked. Defaults to value specified in
`tfq.get_quantum_concurrent_op_mode` which defaults to True
(no blocking). This flag is only needed for advanced users when
using TFQ for very large simulations, or when running on a real
chip.
Returns:
A `callable` with the following signature:
@@ -168,21 +215,34 @@ def get_sampling_op(backend=None):
"""

# TODO (mbbrough): investigate how the above docstring renders.
_check_quantum_concurrent(quantum_concurrent)

op = None
if backend is None:
return lambda programs, symbol_names, symbol_values, num_samples: \
tfq_utility_ops.padded_to_ragged(TFQWavefunctionSimulator.samples(
programs, symbol_names, symbol_values, num_samples))
op = TFQWavefunctionSimulator.samples

if isinstance(backend, cirq.Sampler):
op = cirq_ops._get_cirq_samples(backend)

if op is not None:
if quantum_concurrent is True:
# Return an op that does not block graph level parallelism.
return lambda programs, symbol_names, symbol_values, num_samples: \
tfq_utility_ops.padded_to_ragged(
op(programs, symbol_names, symbol_values, num_samples))

if isinstance(backend, (cirq.Sampler)):
return lambda programs, symbol_names, symbol_values, num_samples: \
tfq_utility_ops.padded_to_ragged(cirq_ops._get_cirq_samples(backend)(
programs, symbol_names, symbol_values, num_samples))
_GLOBAL_OP_LOCK.execute(lambda: tfq_utility_ops.padded_to_ragged(
op(programs, symbol_names, symbol_values, num_samples)))

raise TypeError("Backend {} is invalid. Expected a Cirq.Sampler "
"or None.".format(backend))


def get_state_op(backend=None):
def get_state_op(
backend=None,
*,
quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
"""Get a TensorFlow op that produces states from given quantum circuits.
This function produces a non-differentiable op that will calculate
@@ -212,6 +272,14 @@ def get_state_op(backend=None):
should use when evaluating circuits. Can be any
`cirq.SimulatesFinalState`. If not provided, the default C++
wavefunction simulator will be used.
quantum_concurrent: Optional Python `bool`. True indicates that the
returned op should not block graph level parallelism on itself when
executing. False indicates that graph level parallelism on itself
should be blocked. Defaults to value specified in
`tfq.get_quantum_concurrent_op_mode` which defaults to True
(no blocking). This flag is only needed for advanced users when
using TFQ for very large simulations, or when running on a real
chip.
Returns:
A `callable` with the following signature:
@@ -235,22 +303,35 @@ def get_state_op(backend=None):
"""

# TODO (mbbrough): investigate how the above docstring renders.
_check_quantum_concurrent(quantum_concurrent)

op = None
if backend is None:
return lambda programs, symbol_names, symbol_values: \
tfq_utility_ops.padded_to_ragged(TFQWavefunctionSimulator.state(
programs, symbol_names, symbol_values))
op = TFQWavefunctionSimulator.state

if isinstance(backend, (cirq.SimulatesFinalState)):
op = cirq_ops._get_cirq_simulate_state(backend)

if op is not None:
if quantum_concurrent is True:
# Return an op that does not block graph level parallelism.
return lambda programs, symbol_names, symbol_values: \
tfq_utility_ops.padded_to_ragged(
op(programs, symbol_names, symbol_values))

# Return an op that does block graph level parallelism.
return lambda programs, symbol_names, symbol_values: \
tfq_utility_ops.padded_to_ragged(
cirq_ops._get_cirq_simulate_state(backend)(
programs, symbol_names, symbol_values))
_GLOBAL_OP_LOCK.execute(lambda: tfq_utility_ops.padded_to_ragged(
op(programs, symbol_names, symbol_values)))

raise TypeError("Backend {} is invalid. Expected a Cirq.SimulatesFinalState"
" or None.".format(backend))


def get_sampled_expectation_op(backend=None):
def get_sampled_expectation_op(
backend=None,
*,
quantum_concurrent=quantum_context.get_quantum_concurrent_op_mode()):
"""Get a TensorFlow op that will calculate sampled expectation values.
This function produces a non-differentiable TF op that will calculate
@@ -294,6 +375,14 @@ def get_sampled_expectation_op(backend=None):
backend: Optional Python `object` that specifies what backend this op
should use when evaluating circuits. Can be any `cirq.Sampler`. If
not provided the default C++ sampled expectation op is returned.
quantum_concurrent: Optional Python `bool`. True indicates that the
returned op should not block graph level parallelism on itself when
executing. False indicates that graph level parallelism on itself
should be blocked. Defaults to value specified in
`tfq.get_quantum_concurrent_op_mode` which defaults to True
(no blocking). This flag is only needed for advanced users when
using TFQ for very large simulations, or when running on a real
chip.
Returns:
A `callable` with the following signature:
@@ -324,11 +413,32 @@ def get_sampled_expectation_op(backend=None):
(after resolving the corresponding parameters in).
"""
# TODO (mbbrough): investigate how the above docstring renders.
_check_quantum_concurrent(quantum_concurrent)

op = None
if backend is None:
return TFQWavefunctionSimulator.sampled_expectation
op = TFQWavefunctionSimulator.sampled_expectation

if isinstance(backend, cirq.Sampler):
return cirq_ops._get_cirq_sampled_expectation(backend)
op = cirq_ops._get_cirq_sampled_expectation(backend)

if op is not None:
if quantum_concurrent is True:
# Return an op that does not block graph level parallelism.
return lambda programs, symbol_names, symbol_values, pauli_sums, \
num_samples: op(programs,
symbol_names,
symbol_values,
pauli_sums,
num_samples)

# Return an op that does block graph level parallelism.
return lambda programs, symbol_names, symbol_values, pauli_sums, \
num_samples: _GLOBAL_OP_LOCK.execute(lambda: op(programs,
symbol_names,
symbol_values,
pauli_sums,
num_samples))

raise TypeError(
"Backend {} is invalid. Expected a Cirq.Sampler or None.".format(
70 changes: 54 additions & 16 deletions tensorflow_quantum/core/ops/circuit_execution_ops_test.py
Original file line number Diff line number Diff line change
@@ -31,30 +31,50 @@
DM_SIM = cirq.sim.density_matrix_simulator.DensityMatrixSimulator()

EXPECTATION_OPS = [
circuit_execution_ops.get_expectation_op(backend=None),
circuit_execution_ops.get_expectation_op(backend=WF_SIM),
circuit_execution_ops.get_expectation_op(backend=DM_SIM)
circuit_execution_ops.get_expectation_op(backend=None,
quantum_concurrent=True),
circuit_execution_ops.get_expectation_op(backend=WF_SIM,
quantum_concurrent=True),
circuit_execution_ops.get_expectation_op(backend=DM_SIM,
quantum_concurrent=True),
# For timing interests C++ backend is tested in quantum_concurrent mode.
circuit_execution_ops.get_expectation_op(backend=None,
quantum_concurrent=False)
]

SAMPLING_OPS = [
circuit_execution_ops.get_sampling_op(backend=None),
circuit_execution_ops.get_sampling_op(backend=WF_SIM),
circuit_execution_ops.get_sampling_op(backend=DM_SIM)
circuit_execution_ops.get_sampling_op(backend=None,
quantum_concurrent=True),
circuit_execution_ops.get_sampling_op(backend=WF_SIM,
quantum_concurrent=True),
circuit_execution_ops.get_sampling_op(backend=DM_SIM,
quantum_concurrent=True),
# For timing interests C++ backend is tested in quantum_concurrent mode.
circuit_execution_ops.get_sampling_op(backend=None,
quantum_concurrent=False)
]

STATE_OPS = [
circuit_execution_ops.get_state_op(backend=None),
circuit_execution_ops.get_state_op(backend=WF_SIM),
circuit_execution_ops.get_state_op(backend=DM_SIM)
circuit_execution_ops.get_state_op(backend=None, quantum_concurrent=True),
circuit_execution_ops.get_state_op(backend=WF_SIM, quantum_concurrent=True),
circuit_execution_ops.get_state_op(backend=DM_SIM, quantum_concurrent=True),
# For timing interests C++ backend is tested in quantum_concurrent mode.
circuit_execution_ops.get_state_op(backend=None, quantum_concurrent=False)
]

SAMPLED_EXPECTATION_OPS = [
circuit_execution_ops.get_sampled_expectation_op(backend=None),
circuit_execution_ops.get_sampled_expectation_op(backend=WF_SIM),
circuit_execution_ops.get_sampled_expectation_op(backend=DM_SIM)
circuit_execution_ops.get_sampled_expectation_op(backend=None,
quantum_concurrent=True),
circuit_execution_ops.get_sampled_expectation_op(backend=WF_SIM,
quantum_concurrent=True),
circuit_execution_ops.get_sampled_expectation_op(backend=DM_SIM,
quantum_concurrent=True),
# For timing interests C++ backend is tested in quantum_concurrent mode.
circuit_execution_ops.get_sampled_expectation_op(backend=None,
quantum_concurrent=False),
]

SIMS = [WF_SIM, WF_SIM, DM_SIM]
SIMS = [WF_SIM, WF_SIM, DM_SIM, WF_SIM]


class OpGetterInputChecks(tf.test.TestCase):
@@ -78,6 +98,10 @@ def test_get_expectation_inputs(self):
TypeError, expected_regex="a Cirq.SimulatesFinalState"):
circuit_execution_ops.get_expectation_op(backend="junk")

with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_expectation_op(quantum_concurrent='junk')

def test_get_sampled_expectation_inputs(self):
"""Test that get expectation only accepts inputs it should."""
circuit_execution_ops.get_sampled_expectation_op()
@@ -93,6 +117,11 @@ def test_get_sampled_expectation_inputs(self):
with self.assertRaisesRegex(TypeError, expected_regex="a Cirq.Sampler"):
circuit_execution_ops.get_sampled_expectation_op(backend="junk")

with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_sampled_expectation_op(
quantum_concurrent='junk')

def test_get_samples_inputs(self):
"""Test that get_samples only accepts inputs it should."""
circuit_execution_ops.get_sampling_op()
@@ -108,6 +137,10 @@ def test_get_samples_inputs(self):
expected_regex="Expected a Cirq.Sampler"):
circuit_execution_ops.get_sampling_op(backend="junk")

with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_sampling_op(quantum_concurrent='junk')

def test_get_state_inputs(self):
"""Test that get_states only accepts inputs it should."""
circuit_execution_ops.get_state_op()
@@ -126,6 +159,10 @@ def test_get_state_inputs(self):
processor_id='test',
gate_set=cirq.google.XMON))

with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_state_op(quantum_concurrent='junk')


class ExecutionOpsConsistentyTest(tf.test.TestCase, parameterized.TestCase):
"""Test all ops produce equivalent output to one another."""
@@ -228,9 +265,10 @@ def test_simulate_state_with_symbols(self, op_and_sim, n_qubits,
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim)
for (op,
sim) in zip(STATE_OPS[:-1], SIMS[:-1])],
'op_and_sim': [(op, sim) for (
op,
sim) in zip(STATE_OPS[:-2] +
[STATE_OPS[-1]], SIMS[:-2] + [SIMS[-1]])],
})))
def test_simulate_state_large(self, op_and_sim):
"""Test a reasonably large and complex circuit."""
50 changes: 37 additions & 13 deletions tensorflow_quantum/python/quantum_context.py
Original file line number Diff line number Diff line change
@@ -23,25 +23,24 @@ class QContext:
def __init__(self):
"""Create quantum context."""

# Currently unused property.
# ***Currently unused property.***
# Will control whether batch_util.py or engine_util.py will be hit.
self._engine_mode = False

# Currently unused property.
# Will control locking behavior on high latency ops.
self._low_latency_op_mode = True
self._quantum_concurrent_op_mode = True

def _get_engine_mode(self):
return self._engine_mode

def _set_engine_mode(self, mode):
self._engine_mode = mode

def _get_low_latency_op_mode(self):
return self._low_latency_op_mode
def _get_quantum_concurrent_op_mode(self):
return self._quantum_concurrent_op_mode

def _set_low_latency_op_mode(self, mode):
self._low_latency_op_mode = mode
def _set_quantum_concurrent_op_mode(self, mode):
self._quantum_concurrent_op_mode = mode


_Q_CONTEXT = None
@@ -78,11 +77,36 @@ def get_engine_mode():
return q_context()._get_engine_mode()


def set_low_latency_op_mode(mode):
"""Set the global op latency mode in execution context."""
q_context()._set_low_latency_op_mode(mode)
def set_quantum_concurrent_op_mode(mode):
"""Set the global op latency mode in execution context.
This is advanced TFQ feature that should be used only in very specific
cases. Namely if memory requirements on simulation are extremely large
OR when executing against a true chip.
def get_low_latency_op_mode():
"""Get the global op latency mode from execution context."""
return q_context()._get_low_latency_op_mode()
If you are going to make use of this function please call it at the top
of your module right after import:
>>> import tensorflow_quantum as tfq
>>> tfq.set_quantum_concurrent_op_mode(False)
Args:
mode: Python `bool` indicating whether or not circuit executing ops
should block graph level parallelism. Advanced users should
set `mode=False` when executing very large simulation workloads
or when executing against a real quantum chip.
"""
q_context()._set_quantum_concurrent_op_mode(mode)


def get_quantum_concurrent_op_mode():
"""Get the global op latency mode from execution context.
Returns:
Python `bool` indicating whether or not circuit execution ops
are blocking graph level parallelism with one another.
"""
return q_context()._get_quantum_concurrent_op_mode()
10 changes: 5 additions & 5 deletions tensorflow_quantum/python/quantum_context_test.py
Original file line number Diff line number Diff line change
@@ -50,12 +50,12 @@ def test_global_engine_mode(self):
mode = quantum_context.get_engine_mode()
self.assertTrue(mode)

def test_low_latency_op_mode(self):
"""Test getter an setter behavior for low_latency_op_mode."""
mode = quantum_context.get_low_latency_op_mode()
def test_quantum_concurrent_op_mode(self):
"""Test getter an setter behavior for quantum_concurrent_op_mode."""
mode = quantum_context.get_quantum_concurrent_op_mode()
self.assertTrue(mode)
quantum_context.set_low_latency_op_mode(False)
mode = quantum_context.get_low_latency_op_mode()
quantum_context.set_quantum_concurrent_op_mode(False)
mode = quantum_context.get_quantum_concurrent_op_mode()
self.assertFalse(mode)


0 comments on commit 3d6837a

Please sign in to comment.