從多個方面詳細闡述conda torch

一、安裝與運行

1、conda是一個開源包管理工具,可用於安裝、運行各種軟體包。安裝conda之後,可以通過conda install命令來安裝Torch。

conda install pytorch torchvision torchaudio -c pytorch

2、可以通過conda創建虛擬環境並安裝Torch,以避免系統中存在多個Torch版本的問題。

conda create -n myenv python=3.8
conda activate myenv
conda install pytorch torchvision torchaudio -c pytorch

二、Torch的基礎操作

1、Tensor是Torch中的核心數據結構,可以理解為多維數組。可以通過Tensor創建零向量、隨機向量和手動設置向量等操作。

#創建一個2x3的零向量
import torch
x = torch.zeros(2, 3)
print(x)

#創建一個2x3的隨機向量
x = torch.randn(2, 3)
print(x)

#創建一個2x3的手動設置向量
x = torch.tensor([[1,2,3],[4,5,6]])
print(x)

2、可以通過Tensor進行數學運算,如加減乘除和矩陣乘法等操作。

x = torch.randn(2, 3)
y = torch.randn(2, 3)
#加
z = x + y
#減
z = x - y
#乘
z = x * y
#除
z = x / y
#矩陣乘法
x = torch.rand(2, 3)
y = torch.rand(3, 2)
z = torch.mm(x, y)  # z的大小是(2,2)
print(z)

三、使用Torch進行深度學習

1、Torch的神經網路包nn可以幫助構建神經網路模型。可以通過nn包中的內置模塊實現卷積神經網路(CNN)和長短時記憶網路(LSTM)等模型。

import torch.nn as nn
#構建簡單的神經網路模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(784, 256)
        self.fc2 = nn.Linear(256, 128)
        self.fc3 = nn.Linear(128, 10)
    def forward(self, x):
        x = x.view(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
#構建CNN模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=5, padding=2)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
        self.fc1 = nn.Linear(64*7*7, 1024)
        self.fc2 = nn.Linear(1024, 10)
    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.max_pool2d(x, 2)
        x = F.relu(self.conv2(x))
        x = F.max_pool2d(x, 2)
        x = x.view(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x
#構建LSTM模型
class LSTM(nn.Module):
    def __init__(self):
        super(LSTM, self).__init__()
        self.lstm = nn.LSTM(256, 512, num_layers=3, batch_first=True)
        self.fc = nn.Linear(512, 10)
    def forward(self, x):
        h0 = torch.zeros(3, x.size(0), 512)
        c0 = torch.zeros(3, x.size(0), 512)
        out, _ = self.lstm(x, (h0,c0))
        out = F.softmax(self.fc(out[:, -1, :]), dim=1)
        return out

2、可以使用Torch進行圖像分類、目標檢測和自然語言處理等各種深度學習任務。

import torch.optim as optim
#在MNIST數據集上訓練CNN模型
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
#載入MNIST數據集
train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor(),download=True)
#載入數據集並轉換為Tensor格式
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
#初始化模型、損失函數和優化器
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
#訓練模型
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images)
        labels = Variable(labels)
        optimizer.zero_grad()
        outputs = net(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
#在CIFAR-10數據集上訓練ResNet模型
from torchvision import models
from torchsummary import summary
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#載入CIFAR-10數據集
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
#設置ResNet18模型
net = models.resnet18(pretrained=False)
num_ftrs = net.fc.in_features
net.fc = nn.Linear(num_ftrs, 10)
net.to(device)
#訓練模型
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
for epoch in range(num_epochs):
    net.train()
    train_loss = 0    
    for batch_idx, (inputs, targets) in enumerate(trainloader):
        inputs, targets = inputs.to(device), targets.to(device)
        outputs = net(inputs)
        loss = criterion(outputs, targets)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()

四、Torch的應用場景

1、深度學習研究及開發

import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from torch.autograd import Variable
import torchvision.datasets as dsets
#載入數據集
train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor(),download=True)
#初始化模型、損失函數和優化器
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#訓練模型
for epoch in range(2):
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images)
        labels = Variable(labels)
        optimizer.zero_grad()
        outputs = net(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

#使用Torch進行分散式訓練
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
def train(model, device, train_loader, criterion, optimizer, epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()   
def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12345'
    # initialize the process group
    dist.init_process_group("gloo", rank=rank, world_size=world_size)
    def cleanup():
        dist.destroy_process_group()
if __name__ == '__main__':
    MP = False
    rank, world_size = 0, 1
    if MP:
        mp.set_start_method("spawn")
        world_size = mp.cpu_count()
        rank = args.nr * args.gpus + i
    
    setup(rank, world_size)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = Net().to(device)
    if MP:
        model = DDP(model, device_ids=[i], output_device=i)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
    train_loader = torch.utils.data.DataLoader(, batch_size=64, shuffle=True, num_workers=0, pin_memory=True)
    for epoch in range(2):
        train(model, device, train_loader, criterion, optimizer, epoch)
    if MP:
        cleanup()

2、機器學習演算法實現

import torch
import pandas as pd
#基於線性回歸演算法實現房價預測
from torch.utils.data import Dataset, DataLoader
class HousingDataset(Dataset):
    def __init__(self):
        #使用pandas讀取數據集,並將其轉換為Tensor格式
        train_data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data', header=None, sep='\s+').values
        self.len = train_data.shape[0]
        self.x_data = torch.from_numpy(train_data[:, :-1]).float()
        self.y_data = torch.from_numpy(train_data[:, -1].reshape(-1,1)).float()
    def __getitem__(self, index):
        return self.x_data[index], self.y_data[index]
    def __len__(self):
        return self.len
dataset = HousingDataset()
train_loader = DataLoader(dataset=dataset, batch_size=32, shuffle=True)
class LinearRegression(torch.nn.Module):
    def __init__(self):
        super(LinearRegression, self).__init__()
        self.linear = torch.nn.Linear(13, 1)
    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred
model = LinearRegression()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
epochs = 1000
for epoch in range(epochs):
    for x, y in train_loader:
        y_pred = model(x)
        loss = criterion(y_pred, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    if epoch % 100 == 0:
        print('epoch {}, loss {}'.format(epoch, loss.item()))

五、總結

本文從安裝與運行、Torch的基礎操作、使用Torch進行深度學習、Torch的應用場景等多個方面詳細闡述了conda torch。通過掌握本文所述的知識,可以熟練使用Torch進行深度學習、機器學習演算法實現等各種任務。

原創文章,作者:DJWZX,如若轉載,請註明出處:https://www.506064.com/zh-tw/n/334486.html

(0)
打賞 微信掃一掃 微信掃一掃 支付寶掃一掃 支付寶掃一掃
DJWZX的頭像DJWZX
上一篇 2025-02-05 13:05
下一篇 2025-02-05 13:05

相關推薦

發表回復

登錄後才能評論