Training star identification AI using PyTorch

We’ve finished our previous article TRAINING SET WITH STELLARIUM II having the training set ready to go. It was up to Sebi to the next step and do the actual star identification AI training. Sebi ended up to be sort of reluctant making it in a post so it came to explore a form we haven’t used up to this point – an Interview.

Jan: Where do we start Sebi?

Seb: I’ve been interested which will be the better for our job – whether the PyTorch or the TensorFlow, and after some initial research I’ve picked PyTorch as being more popular.

So I’ve started watching following video tutorial about PyTorch.

After watching it I wrote my first code based on that video. Here it goes:

from random import random
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import models, transforms
from torch.utils.data import DataLoader, random_split
from torchvision.datasets import ImageFolder
import os
import torch.nn as nn
import time
import copy
import signal
projectDir = os.path.dirname(os.path.realpath(__file__))+"/"
dataDir = projectDir+'../data/train_224_224_monochrome_big'
deviceName = "cuda:0" if torch.cuda.is_available() else "cpu"
device = torch.device(deviceName)
trainData = ImageFolder(dataDir, transform=transforms.Compose([transforms.Grayscale(num_output_channels=1), transforms.ToTensor()]))
testData = ImageFolder(projectDir+'../data/train_224_224_monochrome', transform=transforms.Compose([transforms.Grayscale(num_output_channels=1), transforms.ToTensor()]))
modelName = "resnet18"
batchSize = 1000
logName = "resnet18.log"
model = models.resnet18(pretrained=False)
model.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
model.fc = nn.Linear(model.fc.in_features, len(trainData.classes))
if os.path.exists(projectDir+modelName+".pth"):
  model.load_state_dict(torch.load(projectDir+modelName+".pth"))
def main():
  print(trainData.classes)
  save_model(model)
  trainDataLoader = DataLoader(trainData, batch_size=batchSize, shuffle=True, num_workers=3)
  testDataLoader = DataLoader(testData, batch_size=batchSize, shuffle=True, num_workers=3)
  model.to(device)
  criterion = nn.CrossEntropyLoss()
  optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
  scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.8) # every step_size epochs multiply learning rate by gamma
  train_model(model, optimizer, criterion, scheduler, trainDataLoader, testDataLoader, 1, test=True)
def train_model(model, optimizer, criterion, scheduler, trainDataLoader, testDataLoader, numEpochs, test=False):
  start = time.time()
  best_model_wts = copy.deepcopy(model.state_dict())
  best_acc = 0.0
  best_train_acc = 0.0
  running_incorrects = {star:{star:0 for star in trainData.classes} for star in trainData.classes}
  if test:
    log("the next epochs are for testing:")
  for epoch in range(numEpochs):
    log('Epoch {}/{}'.format(epoch+1, numEpochs))
    log('-' * 10)
    for phase in ["train", "test"] if test == False else ["test"]:
      running_loss = 0.0
      running_corrects = 0
      if phase == "train":
        model.train()
      else:
        model.eval()
      for index, (inputs, labels) in enumerate(trainDataLoader if phase == "train" else testDataLoader):
        print('Batch {}/{}'.format((index+1)*len(labels), len(trainDataLoader if phase == "train" else testDataLoader)*len(labels))+' Accuracy: {}/{} Percent: %{:.4f}'.format(running_corrects, len(trainData if phase == "train" else testData), (running_corrects / ((len(labels)*(index)) + 0.00001)) * 100), end="\r")
        inputs = inputs.to(device)
        labels = labels.to(device)
        with torch.set_grad_enabled(phase == "train"):
          outputs = model(inputs)
          _, preds = torch.max(outputs, 1)
          loss = criterion(outputs, labels)
          if phase == "train":
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        running_loss += loss.item() * inputs.size(0)
        running_corrects += torch.sum(preds == labels.data)
        for index, (prediction, label) in enumerate(zip(preds, labels.data)):
          if prediction != label:
            running_incorrects[trainData.classes[label.item()]][trainData.classes[prediction.item()]] += 1
      epoch_loss = running_loss / len(trainData if phase == "train" else testData)
      epoch_acc = running_corrects.double() / len(trainData if phase == "train" else testData)
      if phase == 'train':
        scheduler.step()
        if epoch_acc > best_train_acc:
          best_train_acc = epoch_acc
      # log("failed classes: "+str({star:sorted(running_incorrects[star].items(), key=lambda item: item[1], reverse=True) for star in running_incorrects}))
      log("failed classes: "+str(running_incorrects))
      log('{} Loss: {:.4f} Acc: {:.4f}'.format(
        phase, epoch_loss, epoch_acc))
      log('Accuracy: {}/{}'.format(running_corrects, len(trainData if phase == "train" else testData)))
      # deep copy the model
      if phase == "test" and epoch_acc > best_acc and test == False:
        best_acc = epoch_acc
        best_model_wts = copy.deepcopy(model.state_dict())
        save_model(model)
        log("updated model")
      time_elapsed = time.time() - start
      log('Epoch time stamp: {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    log("")
  log('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
  log('Best val Acc: {:4f}'.format(best_acc))
  # load best model weights
  model.load_state_dict(best_model_wts)
  return model
def save_model(model, name=modelName):
  torch.save(model.state_dict(), projectDir+name+".pth")
  log("saved model: "+modelName)
def handler(signum, frame):
  quit()
def log(message, name=logName):
  print(message)
  with open(projectDir+name, 'a') as file:
    file.write(message+"\n")
signal.signal(signal.SIGINT, handler)
main()

Jan: What’s that doing? Looks messy.

Sebi: No. It is beautiful! Ok. It takes from pre-build network Resnet18 from PyTorch. (Resnet networks are in general famous for its image processing abilities.) I’ve changed it to have a correct number of inputs and outputs.

Jan: Like what?

Sebi: Inputs – greyscale and 64 x 64 points resulutions and Outputs – number of training classes (20 stars).

Jan: What next?

Sebi: Then it starts training that model through the back-propagation neural network and started showing how it progresses. It stops after several epochs and saves itself (trained weights).

Jan: What was a first result?

Sebi: So on a first epoch on a first go, I’ve ended up with accuracy of 0.01 accuracy. It kept improving over 33 epochs and it came up with 60% accuracy against the testing dataset.

Jan: That’s pretty cool for a first run. Would you share that log with us please?

Sebi:

# learning rate: 0.01, momentum: 0.0, step_size: 7, gamma: 0.1
saved model: resnet18
Epoch 1/50
----------
train Loss: 2.8978 Acc: 0.0972
Epoch time stamp: 22m 34s
test Loss: 2.7925 Acc: 0.1225
saved model: resnet18
updated model
Epoch time stamp: 23m 24s
Epoch 2/50
----------
train Loss: 2.6922 Acc: 0.1929
Epoch time stamp: 45m 48s
test Loss: 2.6295 Acc: 0.2250
saved model: resnet18
updated model
Epoch time stamp: 46m 32s

… many mode lines.

Epoch 32/50
----------
train Loss: 2.0006 Acc: 0.6081
Epoch time stamp: 762m 41s
test Loss: 2.0135 Acc: 0.5955
Epoch time stamp: 763m 35s
Epoch 33/50
----------
train Loss: 2.0006 Acc: 0.6106
Epoch time stamp: 786m 42s
test Loss: 2.0042 Acc: 0.6065
saved model: resnet18
updated model
Epoch time stamp: 787m 36s

Jan: Ok, what happened next?

Sebi: I’ve been playing with few training constants like it follows on a smaller dataset.

saved model: resnet18
ended session
# learning rate: 0.1, momentum: 0.9, step_size: 5, gamma: 0.5
saved model: resnet18
Epoch 1/10
----------
train Loss: 0.4430 Acc: 0.8650
Epoch time stamp: 41m 11s
test Loss: 0.0202 Acc: 0.9960
saved model: resnet18
updated model
Epoch time stamp: 42m 34s

.. making it clear that those new training constants have quite significant impact on how it all operates (much better now). As you can see below – it finished in achieving 100% accuracy.

saved model: resnet18
Epoch 1/3
----------
train Loss: 0.0003 Acc: 1.0000
Accuracy: 18000/18000
Epoch time stamp: 22m 11s
test Loss: 0.0001 Acc: 1.0000
Accuracy: 2000/2000
Epoch time stamp: 22m 52s

Jan: Amazing – almost unbelievable. It means that we can identify 20 brightest stars with 100% accuracy now?

Sebi: Not exactly. When we started training on a much bigger dataset and it achieved on the training set just 99% accuracy, while on the testing 95%.

Jan: Why do you think it didn’t reach 100%?

Sebi: Because that dateset is corrupted … definitely.

Jan: I don’t believe you.

Sebi: Checkout this.

Epoch 1/1
----------
failed classes: [('Arcturus', 877), ('Canopus', 157), ('Achernar', 0), ('Acrux', 0), ('Aldebaran', 0), ('Altair', 0), ('Antares', 0), ('Betelgeuse', 0), ('Capella', 0), ('Deneb', 0), ('Fomalhaut', 0), ('Hadar', 0), ('Mimosa', 0), ('Pollux', 0), ('Procyon', 0), ('Rigel', 0), ('Rigel Kentaurus', 0), ('Sirius', 0), ('Spica', 0), ('Vega', 0)]
test Loss: 0.1494 Acc: 0.9483
Accuracy: 18966/20000
Epoch time stamp: 9m 28s

Why do you think it would fail on just two stars?

Jan: That original data set was generated programatically and I am pretty sure there is no problem.

Sebi: Fine, look at this:


Training complete in 0m 25s
Best val Acc: 0.000000
saved model: resnet18
the next epochs are for testing:
Epoch 1/1
----------
failed classes: {'Achernar': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Acrux': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Aldebaran': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Altair': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Antares': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Arcturus': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 79, 'Capella': 798, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Betelgeuse': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Canopus': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 157, 'Spica': 0, 'Vega': 0}, 'Capella': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Deneb': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Fomalhaut': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Hadar': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Mimosa': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Pollux': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Procyon': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Rigel': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Rigel Kentaurus': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Sirius': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Spica': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}, 'Vega': {'Achernar': 0, 'Acrux': 0, 'Aldebaran': 0, 'Altair': 0, 'Antares': 0, 'Arcturus': 0, 'Betelgeuse': 0, 'Canopus': 0, 'Capella': 0, 'Deneb': 0, 'Fomalhaut': 0, 'Hadar': 0, 'Mimosa': 0, 'Pollux': 0, 'Procyon': 0, 'Rigel': 0, 'Rigel Kentaurus': 0, 'Sirius': 0, 'Spica': 0, 'Vega': 0}}
test Loss: 0.1494 Acc: 0.9483
Accuracy: 18966/20000
Epoch time stamp: 9m 55s

… just four stars identification results are strangely incorrect. See the table below.

Jan: Well – it is a neural network, it is ok to have few wrong. I actually think this is an awesome result! Thank you Sebi!

Sebi: FYI I also tried running it with Resnet30 and …

saved model: resnet30
Epoch 1/1
----------
....
train Loss: 0.0008 Acc: 1.0000
Accuracy: 199995/200000
Epoch time stamp: 549m 7s
failed classes: {"Achernar": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Acrux": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Aldebaran": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 1, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Altair": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Antares": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Arcturus": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 271, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Betelgeuse": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Canopus": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 2, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 840, "Hadar": 0, "Mimosa": 0, "Pollux": 1, "Procyon": 0, "Rigel": 8, "Rigel Kentaurus": 0, "Sirius": 92, "Spica": 0, "Vega": 57}, "Capella": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Deneb": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Fomalhaut": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Hadar": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 1, "Sirius": 0, "Spica": 0, "Vega": 0}, "Mimosa": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Pollux": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 1, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Procyon": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 2, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Rigel": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Rigel Kentaurus": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Sirius": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Spica": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}, "Vega": {"Achernar": 0, "Acrux": 0, "Aldebaran": 0, "Altair": 0, "Antares": 0, "Arcturus": 0, "Betelgeuse": 0, "Canopus": 0, "Capella": 0, "Deneb": 0, "Fomalhaut": 0, "Hadar": 0, "Mimosa": 0, "Pollux": 0, "Procyon": 0, "Rigel": 0, "Rigel Kentaurus": 0, "Sirius": 0, "Spica": 0, "Vega": 0}}
test Loss: 0.7076 Acc: 0.9365
Accuracy: 18729/20000
saved model: resnet30
updated model
Epoch time stamp: 566m 14s
Training complete in 566m 14s
Best val Acc: 0.936450
saved model: resnet30
Epoch 1/1
----------

… here the accuracy got even worse – getting just 94%. I find interesting that those invalid results with Resnet18 are not correlating with Resnet30. It just doesn’t make sense …

Jan: Well for me both results look impressive. Thank you for answering all my questions!

Having this stage of our project covered, what lies in front of us next? We need to finish whole loop here which I currently imagine like this:

  1. Stellarium randomly picks a place on Earth in a certain date-time
  2. Sellarium takes pictures of 4 brightest stars saves those locally and records their inclination
  3. Pre-trained AI attempts to identify all 4 stars and potentially runs some basic checks if those are forming a plausible scenario (can be seen from Earth at some stage / above horizon).
  4. In the next stage application uses inclinations of identified stars and looks up star charts for specified date to identify celestial position.
  5. Finally it translates celestial position to GPS coordinates.

However this needs to wait for another day. 🙂

One thought on “Training star identification AI using PyTorch

Leave a Reply