# Load data text_data = [...] vocab = {...}
# Main function def main(): # Set hyperparameters vocab_size = 10000 embedding_dim = 128 hidden_dim = 256 output_dim = vocab_size batch_size = 32 epochs = 10
def forward(self, x): embedded = self.embedding(x) output, _ = self.rnn(embedded) output = self.fc(output[:, -1, :]) return output
# Create model, optimizer, and criterion model = LanguageModel(vocab_size, embedding_dim, hidden_dim, output_dim).to(device) optimizer = optim.Adam(model.parameters(), lr=0.001) criterion = nn.CrossEntropyLoss()
# Create dataset and data loader dataset = LanguageModelDataset(text_data, vocab) loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# Define a simple language model class LanguageModel(nn.Module): def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim): super(LanguageModel, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) self.rnn = nn.RNN(embedding_dim, hidden_dim, batch_first=True) self.fc = nn.Linear(hidden_dim, output_dim)