The length of an arrow in the Bloch sphere
I'm trying to build something called bloch sphere. This represents a state of a quantum bits in the form of an arrow in a sphere, whose radius is 1.0.
I wrote the codes below.
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from itertools import product, combinations
print("Put angle theta and phi, 0≤theta≤180, 0≤phi≤360")
theta = input("theta:")
phi = input("phi:")
theta = float(theta)
phi = float(phi)
X = np.sin(phi)
Y = np.sin(theta)
Z = np.cos(theta)
class quantum_gates:
def __init__(self,X,Y,Z):
self.X = float(X)
self.Y = float(Y)
self.Z = float(Z)
if theta <0 or theta >180 or phi < 0 or phi >360:
print("Put the value of angles again")
else:
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = np.cos(u)*np.sin(v)
y = np.sin(u)*np.sin(v)
z = np.cos(v)
ax.set_xlabel('y')
ax.set_ylabel('x')
ax.set_zlabel('z')
ax.plot_wireframe(y, x, z, color="black")
ax.quiver(0,0,0,Y,X,Z,color="red",length=1.0)
When I put (theta, phi) = (30,0), the tip of the arrow reaches the surface of the sphere. However, when I put (theta,phi) = (30,30), the tip of the arrow goes outside of the sphere.
You can see the image of the current situation from the link below.
1 answer

I guess you perform transformation between coordinates in a wrong way.
X
,Y
, andZ
should be calculated as follows (wikipedia link):X = np.sin(theta) * np.cos(phi) Y = np.sin(theta) * np.sin(phi) Z = np.cos(theta)
Also, numpy trigonometric functions accept values in radians. So, theta should be in the range [0, pi], phi should be in the range [0, 2 * pi). To convert degrees to radians you may use
numpy.radians()
.
See also questions close to this topic

Load Validation Set into memory
I'm trying to train a DenseNet model on a set of patches. My Dataset has 20k images for training and 6k for evaluation.
This is my main function :
if __name__ == "__main__": for epoch in range(START_EPOCH, START_EPOCH+hp.epoch): adjust_learning_rate(optimizer,epoch) train(epoch, hp.wrong_save) #mining(epoch) valid(epoch)
I adjust the learning rate at each epoch due to learning rate decay, I run my training which loads my
trainloader
to the GPU and then I run a validation function on my validation set to check some metrics at each epoch.I have a Nvidia GTX1060 (6 GB) and I can load the train set without no problem, but when I load the
valloader
, I get:RuntimeError: CUDA out of memory. Tried to allocate 74.12 MiB (GPU 0; 5.93 GiB total capacity; 4.73 GiB already allocated; 75.06 MiB free; 19.57 MiB cached)
I would like to know what do you think about the best approach here. Is it possible to load my evaluation set not to the GPU and run the evaluation? Is there any workaround?
There are my train and valid functions:
# Optimization, Loss Function Init criterion = nn.BCELoss() optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=hp.momentum, weight_decay=hp.weight_decay) def train(epoch, wrong_save=False): ''' trian net using patches of slide. save csv file that has patch file name predicted incorrectly. Args: epoch (int): current epoch wrong_save (bool): If True, save the csv file that has patch file name predicted incorrectly ''' print('\nEpoch: %d' % epoch) net.train() train_loss = 0 correct = 0 total = 0 wrong_list = [] for batch_idx, (inputs, targets, filename) in enumerate(trainloader): if USE_CUDA: inputs = inputs.cuda() targets = torch.FloatTensor(np.array(targets).astype(float)).cuda() optimizer.zero_grad() inputs, targets = Variable(inputs), Variable(targets) outputs = net(inputs) outputs = torch.squeeze(outputs) loss = criterion(outputs, targets) loss.backward() optimizer.step() train_loss += loss.item() total += targets.size(0) batch_size = targets.shape[0] outputs += Variable((torch.ones(batch_size) * (THRESHOLD)).cuda()) outputs = torch.floor(outputs) correct += outputs.data.eq(targets.data).cpu().sum() filename_list = filename if wrong_save == True: for idx in range(len(filename_list)): if outputs.data[idx] != targets.data[idx]: wrong_name = filename_list[idx] wrong_list.append(wrong_name) progress_bar(batch_idx, len(trainloader), 'Loss: %.3f  Acc: %.3f%% (%d/%d)' % (train_loss/(batch_idx+1), 100.*correct/total, correct, total)) if wrong_save == True: wrong_csv = open(cf.wrong_path+'wrong_data_epoch'+str(epoch)+'.csv','w',encoding='utf8') wr = csv.writer(wrong_csv) for name in wrong_list: wr.writerow([name]) wrong_csv.close() CUR_TRA_ACC.append(100.*correct/total) def valid(epoch): ''' valid net using patches of slide. Save checkpoint if AUC score is higher than saved checkpoint's. Args: epoch (int): current epoch ''' global BEST_AUC global THRESHOLD global LR_CHANCE global CK_CHANCE global LR_DECAY net.eval() valid_loss = 0 total = 0 correct = 0 outputs_list = np.array([]) targets_list = np.array([]) for batch_idx, (inputs, targets) in enumerate(valloader): if USE_CUDA: inputs = inputs.cuda() targets = torch.FloatTensor(np.array(targets).astype(float)).cuda() batch_size = targets.shape[0] inputs, targets = Variable(inputs, volatile=True), Variable(targets) outputs = net(inputs) total += targets.size(0) outputs = torch.squeeze(outputs) loss = criterion(outputs, targets) valid_loss += loss.item() _outputs = np.array(outputs.data.cpu()).astype(float) _targets = np.array(targets.data.cpu()).astype(float) outputs_list = np.append(outputs_list, _outputs) targets_list = np.append(targets_list, _targets) outputs += Variable((torch.ones(batch_size) * (1THRESHOLD)).cuda()) outputs = torch.floor(outputs) correct += int(outputs.eq(targets).cpu().sum()) progress_bar(batch_idx, len(valloader), 'Loss: %.3f  Acc: %.3f%% (%d/%d)' % (valid_loss/(batch_idx+1), 100.*correct/total, correct, total)) print(tp, tn, fp, fn) correct, tp, tn, fp, fn, recall, precision, specificity, f1_score, auc, threshold = stats(outputs_list, targets_list) acc = correct/total THRESHOLD = threshold print(tp, tn, fp, fn) print('Acc: %.3f, Recall: %.3f, Prec: %.3f, Spec: %.3f, F1: %.3f, Thres: %.3f, AUC: %.3f' %(acc, recall, precision, specificity, f1_score, threshold, auc)) print('%17s %12s\n%11s %8d %8d\n%11s %8d %8d' %('Tumor', 'Normal','pos',tp,fp,'neg',fn,tn)) print("lr: ",args.lr * (0.5 ** (LR_DECAY)), "lr chance:",LR_CHANCE) # plot data CUR_EPOCH.append(epoch) CUR_VAL_ACC.append(acc) CUR_LOSS.append(valid_loss/(batch_idx+1)) CUR_LR.append(args.lr * (0.5 ** (LR_DECAY))) # Save checkpoint. if auc > BEST_AUC: print('saving...') BEST_AUC = auc state = { 'net': net if USE_CUDA else net, 'acc': acc, 'loss': valid_loss, 'recall': recall, 'specificity': specificity, 'precision': precision, 'f1_score': f1_score, 'auc': auc, 'epoch': epoch, 'lr': args.lr * (0.5**(LR_DECAY)), 'threshold': threshold } torch.save(state, './checkpoint/ckpt.t7')
And this is how I construct my dataset:
def get_dataset(train_transform, test_transform, train_max, val_max, subtest_max, ratio=0, mining_mode=False): ''' dataset function to get train, valid, subtest, test, mining dataset Args: train_transform (torchvision.transforms): train set transform for data argumentation test_transform (torchvision.transfroms): test set transform for data argumentation train_max (int): limit of trian set val_max (int): limit of validation set subtest_max (int): limit of subtest set ratio (int): for mining_mode, inclusion ratio of train set compared mining set mining_mode (bool): If true, return mining dataset ''' train_dataset = camel(cf.dataset_path + 'train/', usage='train', limit = train_max, transform=train_transform) val_dataset = camel(cf.dataset_path + 'validation/', usage='val', limit = val_max, transform=test_transform) subtest_dataset = camel(cf.dataset_path + 'test/', usage='subtest', limit = subtest_max, transform=test_transform) test_dataset = camel(cf.test_path, usage ='test',transform=test_transform) if mining_mode == True: mining_dataset = camel(cf.dataset_path + 'mining/', usage='mining', train_ratio = ratio, transform=train_transform) return train_dataset, val_dataset, subtest_dataset, test_dataset, mining_dataset else: return train_dataset, val_dataset, subtest_dataset, test_dataset

VSCode running Python 2 instead of 3
I am using Python with VSCode, and if I use Cmd+Shift+P and type Run Code, it runs the code with Python2 even though I have specified it to use Python3. I have read this tutorial: How to force VSCode to use Python 3 instead of Python 2? but I do not understand how to change it. (and I can't comment because I don't have enough reputation yay) Could anyone help? Thanks!
Maybe it could be related to the fact that the mini terminal at the bottom runs "python u " instead of "python3 u "? Does anyone know how to change that?

Python  extra keyword(?) and inheritance
typing.py
(from Python 3.6.6 as bundled with Anaconda) declaresList
class as follows:class List(list, MutableSequence[T], extra=list):
As far as my understanding goes, it means that
List
class inherits fromlist
andMutableSequence[T]
). What is the meaning ofextra
assignment in the inheritance list? 
Grover's Algorithm: Behaviour when search element doesn't exist
I have a question about a particular behaviour in Grover's Algorithm. My question is what will happen in Grover's Algorithm if the element we are searching doesn't exist.

Gradient Descent huge loss function
I'm facing a nonbinary classification problem of this form:
 Input: 2dimensional vector (x,y) with 1 < x < 1, 1 < y < 1.
 Output: 4dimensional vector (p_0, p_1, p_2, p_3), where 0 < p_i < 1, and sum(p_i) <= 1 (of course, i = 0,...,3).
The program I use to classify them wants to simulate a 4qubit quantum circuit, that meaning that I start with a 16dimensional vector with a 1 as a first entry, and 0s elsewhere, and then I apply a series of rotations in the shape of matrix products.
Rephrasing it a bit: I start with said 16dimensional vector, and then I multiply it by a 16x16 matrix which depends on the point's first component "x", which renders a new 16dimensional vector. Next, I multiply this new vector by a different matrix, now having "y" as a parameter. I call this process the "encoding" of the data.
Once encoded, I use a set of matrices, depending on a different parameter each. A smart choice of these parameters is what will bring me the desired classification.
So, after every product is calculated, I end up with a new 16dimensional vector, which depends on every mentioned parameter, and we will call a(x,y).
From here I design a target function f(x,y) = (p_0, p_1, p_2, p_3). Each of the p_i's will be a sum of some of a(x,y)'s components.
Now, f(x,y) is the actual output I obtain for input (x,y). Let me call d(x,y) the desired output. My goal is to find a set of parameter values that makes f(x,y) be as close as possible to d(x,y) for a somewhat large amount of input data.
d(x,y) can take only one of four possible values:
 (1,0,0,0) dubbed as "0",
 (0,1,0,0) dubbed as "1",
 (0,0,1,0) dubbed as "2",
 (0,0,0,1) dubbed as "3".
The cost function I chose for this affair is a quadratic cost function. In order to minimize the cost function, I use a Gradient Descent algorithm. I compute the partial derivatives with a centered finite differences method.
So, now that the program is described, my real problem: With this configuration, I obtain pretty high Cost (loss) values, ranging from 1.5 to about 4.
In order to achieve these results, I run the Gradient Descent program for 30 times (epochs), with a learning rate of 1.
I'm used to having really small loss values (0.25 used to be a very bad result for a very similar problem), but I do still not have a good enough grip of what is actually going on behind the numbers as to know whether I should be too worried about this or not.
My program achieves a ~40% accuracy at its finest (trying several different sets of matrices) for a 1000 training points and a 1000 evaluating points.
I assume that a high loss value might mean that my program is just not good enough to perform this classification, but I do not know to what extend shall I be able to obtain better results.
Is there anything I'm doing utterly wrong, or is it just that this structure is not good enough for classifying?
Thank you very much for any feedback in advance.

TypeError: 'Line3DCollection' object is not iterable
I'm trying to create animation about how the value of a quantum bit would be changed by computation called Xgate, in form of an arrow. Here are the codes that I wrote.
#Import libraries from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from matplotlib import animation import numpy as np #Accept input (theta, phi) from a user print("Put angle theta and phi, 0≤ theta ≤180, 0≤ phi ≤360") theta = input("theta:") phi = input("phi:") theta = np.radians(float(theta)) phi = np.radians(float(phi)) #Calculate x,y,z coordinates X = np.sin(theta) * np.cos(phi) Y = np.sin(theta) * np.sin(phi) Z = np.cos(theta) #Adjusting the length of an arrow length = np.sqrt(X**2 + Y**2 + Z**2) if length > 1: X = X/length Y = Y/length Z = Z/length # Figure of the animation fig = plt.figure() ax = fig.gca(projection='3d') ax.set_aspect("equal") u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j] x = np.cos(u)*np.sin(v) y = np.sin(u)*np.sin(v) z = np.cos(v) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') ax.plot_wireframe(x,y,z, color="black") # Calculate x,y,z coordinates in the process of the change length = 9 xgate_theta = np.linspace(theta,theta+np.pi,length) xgate_phi = np.linspace(phi,phi,length) #Array of x,y,z coordinates xgate= [] # Only x coordinates xgate_x = [] # Only y coordinates xgate_y = [] # Only z coordinates xgate_z = [] for i in range(length): xgate_x.append(X) xgate_z.append(np.cos(xgate_theta[i])) xgate_y.append(np.sqrt(1np.sqrt(xgate_x[i]**2+xgate_z[i]**2))*(1)) for j in range(length): xgate.append(plt.quiver(0,0,0,xgate_x[j],xgate_y[j],xgate_z[j],color="red")) ani = animation.ArtistAnimation(fig,xgate,interval=1000) plt.show()
Then, I got the following error.
Traceback (most recent call last): File "/Users/makotonakai/anaconda3/lib/python3.6/site packages/matplotlib/cbook/__init__.py", line 388, in process proxy(*args, **kwargs) File "/Users/makotonakai/anaconda3/lib/python3.6/sitepackages/matplotlib/cbook/__init__.py", line 228, in __call__ return mtd(*args, **kwargs) File "/Users/makotonakai/anaconda3/lib/python3.6/sitepackages/matplotlib/animation.py", line 1026, in _start self._init_draw() File "/Users/makotonakai/anaconda3/lib/python3.6/sitepackages/matplotlib/animation.py", line 1556, in _init_draw for artist in f: TypeError: 'Line3DCollection' object is not iterable
I cannot tell what line causes this error by just looking this error message. Can anybody tell me how I can fix this error?