Skip to content

Commit

Permalink
gradient tests passing
Browse files Browse the repository at this point in the history
  • Loading branch information
Antonio Martinez committed Apr 5, 2021
1 parent 4fdd6cd commit e9352de
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 66 deletions.
12 changes: 8 additions & 4 deletions tensorflow_quantum/python/differentiators/parameter_shift.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,9 @@ def get_gradient_circuits(self, programs, symbol_names, symbol_values):
# then reshape to the correct batch size
batch_programs = tf.reshape(
tf.transpose(new_programs, [1, 0, 2, 3]), [n_programs, m_tile])
weights = tf.reshape(
tf.transpose(weights, [1, 0, 2, 3]), [n_programs, n_symbols, n_param_gates * n_shifts])
batch_weights = tf.reshape(
tf.transpose(weights, [1, 0, 2, 3]),
[n_programs, n_symbols, n_param_gates * n_shifts])
shifts = tf.reshape(
tf.transpose(shifts, [1, 0, 2, 3]), [n_programs, m_tile, 1])

Expand All @@ -95,9 +96,12 @@ def get_gradient_circuits(self, programs, symbol_names, symbol_values):
tf.expand_dims(symbol_values, 1), [1, m_tile, 1])
batch_symbol_values = tf.concat([tiled_symbol_values, shifts], 2)

batch_mapper = tf.tile(tiled_expectation)
single_program_mapper = tf.reshape(
tf.range(n_symbols * n_param_gates * n_shifts),
[n_symbols, n_param_gates * n_shifts])
batch_mapper = tf.tile(tf.expand_dims(single_program_mapper, 0), [n_programs, 1, 1])

return (batch_programs, new_symbol_names, batch_symbol_values, batch_mapper)
return (batch_programs, new_symbol_names, batch_symbol_values, batch_weights, batch_mapper)

@differentiator.catch_empty_inputs
@tf.function
Expand Down
127 changes: 65 additions & 62 deletions tensorflow_quantum/python/differentiators/parameter_shift_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def test_get_gradient_circuits(self):


(test_batch_programs, test_new_symbol_names, test_batch_symbol_values,
test_batch_mapper) = diff.get_gradient_circuits(
test_batch_weights, test_batch_mapper) = diff.get_gradient_circuits(
input_programs, input_symbol_names, input_symbol_values)
for i in range(tf.shape(input_programs)[0]):
self.assertAllEqual(util.from_tensor(expected_batch_programs[i]),
Expand All @@ -186,70 +186,73 @@ def test_get_gradient_circuits(self):
atol=1e-6)
# self.assertAllClose(expected_batch_mapper, test_batch_mapper, atol=1e-6)

# @parameterized.parameters(
# list(
# util.kwargs_cartesian_product(
# **{
# 'differentiator': [
# parameter_shift.ParameterShift(),
# ],
# 'n_qubits': [5],
# 'n_programs': [3],
# 'n_ops': [3],
# 'symbol_names': [['a', 'b']]
# })))
# def test_gradient_circuits_grad_comparison(self, differentiator, n_qubits,
# n_programs, n_ops, symbol_names):
# """Test that analytic gradient agrees with the one from grad circuits"""
# # Get random circuits to check.
# qubits = cirq.GridQubit.rect(1, n_qubits)
# circuit_batch, resolver_batch = \
# util.random_symbol_circuit_resolver_batch(
# cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)
# psums = [
# util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
# ]
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'differentiator': [
parameter_shift.ParameterShift(),
],
'n_qubits': [5],
'n_programs': [3],
'n_ops': [3],
'symbol_names': [['a', 'b']]
})))
def test_gradient_circuits_grad_comparison(self, differentiator, n_qubits,
n_programs, n_ops, symbol_names):
"""Test that analytic gradient agrees with the one from grad circuits"""
# Get random circuits to check.
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs)
psums = [
util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch
]

# # Convert to tensors.
# symbol_names_array = np.array(symbol_names)
# symbol_values_array = np.array(
# [[resolver[symbol]
# for symbol in symbol_names]
# for resolver in resolver_batch],
# dtype=np.float32)
# symbol_names_tensor = tf.convert_to_tensor(symbol_names_array)
# symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
# programs = util.convert_to_tensor(circuit_batch)
# ops_tensor = util.convert_to_tensor(psums)
# Convert to tensors.
symbol_names_array = np.array(symbol_names)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch],
dtype=np.float32)
symbol_names_tensor = tf.convert_to_tensor(symbol_names_array)
symbol_values_tensor = tf.convert_to_tensor(symbol_values_array)
programs = util.convert_to_tensor(circuit_batch)
ops_tensor = util.convert_to_tensor(psums)

# # Get gradients using expectations of gradient circuits.
# (batch_programs, new_symbol_names, batch_symbol_values,
# batch_mapper) = differentiator.get_gradient_circuits(
# programs, symbol_names_tensor, symbol_values_tensor)
# analytic_op = circuit_execution_ops.get_expectation_op()
# batch_pauli_sums = tf.tile(tf.expand_dims(ops_tensor, 1),
# [1, tf.shape(batch_mapper)[2], 1])
# n_batch_programs = tf.reduce_prod(tf.shape(batch_programs))
# n_symbols = len(symbol_names)
# batch_expectations = analytic_op(
# tf.reshape(batch_programs, [n_batch_programs]), new_symbol_names,
# tf.reshape(batch_symbol_values, [n_batch_programs, n_symbols]),
# tf.reshape(batch_pauli_sums, [n_batch_programs, n_ops]))
# batch_expectations = tf.reshape(batch_expectations,
# tf.shape(batch_pauli_sums))
# grad_manual = tf.reduce_sum(
# tf.einsum('ikm,imp->ikp', batch_mapper, batch_expectations), -1)
# Get gradients using expectations of gradient circuits.
(batch_programs, new_symbol_names, batch_symbol_values,
batch_weights, batch_mapper) = differentiator.get_gradient_circuits(
programs, symbol_names_tensor, symbol_values_tensor)
analytic_op = circuit_execution_ops.get_expectation_op()
batch_pauli_sums = tf.tile(tf.expand_dims(ops_tensor, 1),
[1, tf.shape(batch_programs)[1], 1])
n_batch_programs = tf.reduce_prod(tf.shape(batch_programs))
n_symbols = tf.shape(new_symbol_names)[0]
batch_expectations = analytic_op(
tf.reshape(batch_programs, [n_batch_programs]), new_symbol_names,
tf.reshape(batch_symbol_values, [n_batch_programs, n_symbols]),
tf.reshape(batch_pauli_sums, [n_batch_programs, n_ops]))
batch_expectations = tf.reshape(batch_expectations,
tf.shape(batch_pauli_sums))
batch_jacobian = tf.map_fn(
lambda x: tf.einsum('km,kmp->kp', x[0], tf.gather(x[1], x[2])),
(batch_weights, batch_expectations, batch_mapper),
fn_output_signature=tf.float32)
grad_manual = tf.reduce_sum(batch_jacobian, -1)

# # Get gradients using autodiff.
# differentiator.refresh()
# differentiable_op = differentiator.generate_differentiable_op(
# analytic_op=analytic_op)
# with tf.GradientTape() as g:
# g.watch(symbol_values_tensor)
# exact_outputs = differentiable_op(programs, symbol_names_tensor,
# symbol_values_tensor, ops_tensor)
# grad_auto = g.gradient(exact_outputs, symbol_values_tensor)
# self.assertAllClose(grad_manual, grad_auto)
# Get gradients using autodiff.
differentiator.refresh()
differentiable_op = differentiator.generate_differentiable_op(
analytic_op=analytic_op)
with tf.GradientTape() as g:
g.watch(symbol_values_tensor)
exact_outputs = differentiable_op(programs, symbol_names_tensor,
symbol_values_tensor, ops_tensor)
grad_auto = g.gradient(exact_outputs, symbol_values_tensor)
self.assertAllClose(grad_manual, grad_auto)


if __name__ == "__main__":
Expand Down

0 comments on commit e9352de

Please sign in to comment.