Skip to content

Commit

Permalink
[major] support four more design spaces
Browse files Browse the repository at this point in the history
  • Loading branch information
Hanrui-Wang committed May 26, 2021
1 parent cce2319 commit 32bc78a
Show file tree
Hide file tree
Showing 2 changed files with 160 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -322,3 +322,6 @@ runs/
/examples/noise_script/plot/None.None.None.node2.x_before_act_quant.pdf
/examples/noise_script/plot/None.None.None.node2.x_before_add_noise.error.pdf
/examples/noise_script/plot/None.None.None.node2.x_before_add_noise.pdf
/examples/noise_script/plot/cluster/
/activations/fashion.four0123.eval.santiago.real.opt2.noancilla.act_quant.300_loadop_s18000_valid.pt
/activations/fashion.four0123.eval.tq.act_quant.300_loadop_s18000_valid.pt
157 changes: 157 additions & 0 deletions torchquantum/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -519,9 +519,166 @@ def build_layers(self):
return layers_all


class SethLayer0(LayerTemplate0):
def build_layers(self):
layers_all = tq.QuantumModuleList()
for k in range(self.arch['n_blocks']):
layers_all.append(
Op2QAllLayer(
op=tq.RZZ,
n_wires=self.n_wires,
has_params=True,
trainable=True,
jump=1,
circular=True))
layers_all.append(
Op1QAllLayer(
op=tq.RY,
n_wires=self.n_wires,
has_params=True,
trainable=True))
return layers_all


class BarrenLayer0(LayerTemplate0):
def build_layers(self):
layers_all = tq.QuantumModuleList()
layers_all.append(
Op1QAllLayer(
op=tq.SHadamard,
n_wires=self.n_wires,)
)
for k in range(self.arch['n_blocks']):
layers_all.append(
Op1QAllLayer(
op=tq.RX,
n_wires=self.n_wires,
has_params=True,
trainable=True))
layers_all.append(
Op1QAllLayer(
op=tq.RY,
n_wires=self.n_wires,
has_params=True,
trainable=True))
layers_all.append(
Op1QAllLayer(
op=tq.RZ,
n_wires=self.n_wires,
has_params=True,
trainable=True))
layers_all.append(
Op2QAllLayer(
op=tq.CZ,
n_wires=self.n_wires,
jump=1))
return layers_all


class FarhiLayer0(LayerTemplate0):
def build_layers(self):
layers_all = tq.QuantumModuleList()
for k in range(self.arch['n_blocks']):
layers_all.append(
Op2QAllLayer(
op=tq.RZX,
n_wires=self.n_wires,
has_params=True,
trainable=True,
jump=1,
circular=True))
layers_all.append(
Op2QAllLayer(
op=tq.RXX,
n_wires=self.n_wires,
has_params=True,
trainable=True,
jump=1,
circular=True))
return layers_all


class MaxwellLayer0(LayerTemplate0):
def build_layers(self):
layers_all = tq.QuantumModuleList()
for k in range(self.arch['n_blocks']):
layers_all.append(
Op1QAllLayer(
op=tq.RX,
n_wires=self.n_wires,
has_params=True,
trainable=True))
layers_all.append(
Op1QAllLayer(
op=tq.S,
n_wires=self.n_wires))
layers_all.append(
Op2QAllLayer(
op=tq.CNOT,
n_wires=self.n_wires,
jump=1,
circular=True))

layers_all.append(
Op1QAllLayer(
op=tq.RY,
n_wires=self.n_wires,
has_params=True,
trainable=True))
layers_all.append(
Op1QAllLayer(
op=tq.T,
n_wires=self.n_wires))
layers_all.append(
Op2QAllLayer(
op=tq.SWAP,
n_wires=self.n_wires,
jump=1,
circular=True))

layers_all.append(
Op1QAllLayer(
op=tq.RZ,
n_wires=self.n_wires,
has_params=True,
trainable=True))
layers_all.append(
Op1QAllLayer(
op=tq.Hadamard,
n_wires=self.n_wires))
layers_all.append(
Op2QAllLayer(
op=tq.SSWAP,
n_wires=self.n_wires,
jump=1,
circular=True))

layers_all.append(
Op1QAllLayer(
op=tq.U1,
n_wires=self.n_wires,
has_params=True,
trainable=True
))
layers_all.append(
Op2QAllLayer(
op=tq.CU3,
n_wires=self.n_wires,
has_params=True,
trainable=True,
jump=1,
circular=True))

return layers_all


layer_name_dict = {
'u3cu3_0': U3CU3Layer0,
'cu3_0': CU3Layer0,
'cxrzsx_0': CXRZSXLayer0,
'seth_0': SethLayer0,
'barren_0': BarrenLayer0,
'farhi_0': FarhiLayer0,
'maxwell_0': MaxwellLayer0,
}

0 comments on commit 32bc78a

Please sign in to comment.