|
- import tensorflow as tf
- import numpy as np
- import eta_functions as eta
- layers = tf.keras.layers
- """""
- This architecture is Representation part.
- """
-
-
- class EncoderLayer(tf.keras.Model):
- def __init__(self, filters, kernel_size, strides_s = 2, add = False, padding_s = 'same'):
- super(EncoderLayer, self).__init__()
- initializer = tf.random_normal_initializer(mean=0., stddev=0.1)
- conv = layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides_s,
- padding=padding_s, kernel_initializer=initializer, kernel_regularizer=tf.keras.regularizers.l1(0.000001),use_bias=True)
- ac = layers.Activation('relu')
- self.encoder_layer = None
- if add:
- self.encoder_layer = tf.keras.Sequential([conv,ac])
- else:
- self.encoder_layer = tf.keras.Sequential([conv])
-
- def call(self, x):
- return self.encoder_layer(x)
-
-
- class generator_layer(tf.keras.Model):
- def __init__(self,user_num):
- super(generator_layer,self).__init__()
-
- # matrix for x
- x_layer_1 = EncoderLayer(filters=64, kernel_size=(1, 1), strides_s=(1, 1), add=True)
- x_layer_2 = EncoderLayer(filters=64, kernel_size=(1, 1), strides_s=(1, 1), add=False)
- self.encoder_layers = [x_layer_1, x_layer_2 ]
-
- d_layer_1= EncoderLayer(filters=64, kernel_size=(1, 1), strides_s=(1, 1), add=True)
- d_layer_2 = EncoderLayer(filters=2, kernel_size=(1, 1), strides_s=(1, 1), add=False)
-
- self.decoder_layers= [d_layer_1, d_layer_2 ]
- self.thresh= tf.Variable(0.01*tf.ones(user_num),dtype=tf.float32)
-
- def call(self,r):
-
- x1=r
-
- with tf.GradientTape() as g:
- g.watch(x1)
- x2 = x1
- for x_layer in self.encoder_layers:
- x2 = x_layer(x2)
- x2 = eta.soft_thresh(x2, self.thresh)
- for d_layer in self.decoder_layers:
- x2 = d_layer(x2)
-
- grads = g.gradient(x2, x1)
- del g
-
- dxdr2 = tf.reduce_mean(grads, [-1, -2, -3])
-
- return x2,dxdr2
-
- class Generator(tf.keras.Model):
- def __init__(self,A,user_num,unfold):
- super(Generator, self).__init__()
- self.ISIT_layer=[]
- self.unfold=unfold
- for t in range(unfold):
- locals()['Layer'+str(t)]=generator_layer(user_num)
- self.ISIT_layer.append(locals()['Layer'+str(t)])
-
- self.Ar = np.real(A)
- self.Ai = np.imag(A)
- self.Ar_v = tf.Variable(self.Ar, dtype=tf.float32)
- self.Ai_v = tf.Variable(self.Ai, dtype=tf.float32)
- self.A=A
- #
- self.B_r = tf.Variable(self.Ar.T, dtype=tf.float32,trainable=False)
- self.B_i = tf.Variable(-self.Ai.T, dtype=tf.float32,trainable=False)
-
- self.u=tf.Variable(0.1*tf.ones(unfold-1), dtype=tf.float32)
-
-
- def call(self,x):
- hat_x=self.build_LAMP(x,self.Ar_v,self.Ai_v,self.unfold)
- return hat_x
-
- def complex_mul(self,B_r,B_i,inputs,key):
- if key =="inv":
- B_r=tf.transpose(B_r)
- B_i=-tf.transpose(B_i)
-
- By_r1 = tf.matmul(B_r, inputs[:,:,:,0])
- By_r2= tf.matmul(B_i,inputs[:, :, :, 1])
- By_i1= tf.matmul(B_r, inputs[:, :, :, 1])
- By_i2 = tf.matmul(B_i, inputs[:, :, :, 0])
-
- return By_r1-By_r2, By_i1+By_i2
-
- def build_LAMP(self,inputs,A_r,A_i,unfold):
-
- By_r, By_i = self.complex_mul(self.B_r,self.B_i,inputs,'no')
-
-
- By = tf.concat([By_r[..., tf.newaxis], By_i[..., tf.newaxis]], axis=-1)
- D, N = A_r.shape
- ante_num = inputs.shape[2]
- OneOverM = tf.constant(float(1) / D / ante_num, dtype=tf.float32)
- NOverM = tf.constant(float(N) / D, dtype=tf.float32)
- rvar_ = tf.reduce_sum(tf.square(inputs), [-1, -2, -3]) * OneOverM
- xhat,dxdr=self.ISIT_layer[0](By)
- vt_ = inputs
- for t in range(unfold-1):
-
- Ax0,Ax1=self.complex_mul(A_r,A_i,xhat,'no')
- Ax = tf.concat([Ax0[..., tf.newaxis], Ax1[..., tf.newaxis]], axis=-1)
- dxdr=tf.tile(dxdr[...,tf.newaxis,tf.newaxis,tf.newaxis],[1,D,ante_num,2])
- vt_ =inputs -Ax+NOverM*tf.multiply(dxdr,vt_)
-
- #333
- bv1,bv2 = self.complex_mul(self.B_r,self.B_i,vt_,'no')
- bv_= tf.concat([bv1[..., tf.newaxis], bv2[..., tf.newaxis]], axis=-1)
- rhat_ = xhat + bv_
-
- xhat,dxdr = self.ISIT_layer[t+1](rhat_)
-
- return xhat
|