GPL 2.0
# In[ ]:
discord_token = bottoken' #@param {type:"string"}
# ##Module installation
# this will install all the necessary modules
# In[ ]:
# ##Download and load GPT NEO model.
# It will take a little bit
# In[3]:
from transformers import GPTNeoForCausalLM, GPT2Tokenizer
model = GPTNeoForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B')
tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B')
# add padding token to tokenizer
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
# set padding token id to the id of the padding token
model.config.pad_token_id = tokenizer.pad_token_id
model.cuda()
# In[4]:
modes = dict()
modes['chat'] = {'prompt' : 'model\n\n',
'partner' : 'Partner: ',
'ai' : 'Humoid: ',
'end' : '\n'}
mode = modes['chat']
# In[5]:
# ##Discord bot
# In[1]:
import discord
from discord.ext import commands
import nest_asyncio
import asyncio
import time
import os
intents = discord.Intents.default()
intents.typing = False
intents.presences = False
bot = commands.Bot(command_prefix='!', intents=intents)
class TridequeEntry:
def __init__(self, matrix=None):
if matrix is None:
self.matrix = [["" for _ in range(13)] for _ in range(13)]
else:
self.matrix = matrix
def update_entry(self, x, y, value):
self.matrix[x][y] = value
def get_value(self, x, y):
return self.matrix[x][y]
trideque_entry = TridequeEntry()
def generate_chunks(prompt, chunk_size=1500):
words = prompt.split()
return [' '.join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
async def gpt3_generate(chunk, max_length=845, time_limit=19.0):
start_time = time.time()
inputs = tokenizer.encode(chunk, return_tensors='pt', truncation=True, max_length=512)
inputs = inputs.cuda()
attention_mask = inputs.ne(tokenizer.pad_token_id).float()
outputs = model.generate(inputs, max_length=max_length, do_sample=True, max_time=time_limit, attention_mask=attention_mask)
end_time = time.time()
return tokenizer.decode(outputs[0]), end_time - start_time
async def write_to_buffer_file(response_text):
buffer_file = "buffer.txt"
with open(buffer_file, "w") as file:
file.write(response_text)
return buffer_file
async def read_from_buffer_file(buffer_file, chunk_size=1800):
with open(buffer_file, "r") as file:
while True:
chunk = file.read(chunk_size)
if not chunk:
break
yield chunk
os.remove(buffer_file)
async def send_chunks(ctx, prompt_chunks):
total_time = 0.0
for chunk in prompt_chunks:
gpt3_response, response_time = await gpt3_generate(chunk)
total_time += response_time
buffer_file = await write_to_buffer_file(gpt3_response)
initial_msg = None
async for response_part in read_from_buffer_file(buffer_file):
if not initial_msg:
initial_msg = await ctx.send(response_part)
else:
await asyncio.sleep(0.5)
await initial_msg.edit(content=initial_msg.content + response_part)
await ctx.send(f"Total response time: {total_time:.2f} seconds.")
@bot.event
async def on_ready():
print(f'Logged in as {bot.user.name} (ID: {bot.user.id})')
@bot.command()
async def trideque(ctx, *, user_input):
await ctx.send('The Matrix is loading, Robotic Reply Generating, please wait...')
prompt_chunks = generate_chunks(user_input)
await send_chunks(ctx, prompt_chunks) # Make sure to pass 'ctx' as an argument here
# Replace 'your_bot_token' with your actual bot token from the Discord Developer Portal
nest_asyncio.apply()
bot.run('bottoken')
# In[ ]:
testdavev1.1.buffer.py
testdavev1.2trideque.buffer.py
# In[ ]:
discord_token = 'bottoken' #@param {type:"string"}
# ##Module installation
# this will install all the necessary modules
# In[ ]:
# ##Download and load GPT NEO model.
# It will take a little bit
# In[3]:
from transformers import GPTNeoForCausalLM, GPT2Tokenizer
model = GPTNeoForCausalLM.from_pretrained('EleutherAI/gpt-neo-1.3B')
tokenizer = GPT2Tokenizer.from_pretrained('EleutherAI/gpt-neo-1.3B')
# add padding token to tokenizer
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
# set padding token id to the id of the padding token
model.config.pad_token_id = tokenizer.pad_token_id
model.cuda()
# In[4]:
modes = dict()
modes['chat'] = {'prompt' : 'model\n\n',
'partner' : 'Partner: ',
'ai' : 'Humoid: ',
'end' : '\n'}
mode = modes['chat']
# In[5]:
# ##Discord bot
# In[1]:
import discord
from discord.ext import commands
import nest_asyncio
import asyncio
import time
import os
# Trideque matrix
trideque = [
['Natural Language Processing', 'Speech Recognition', 'Text Generation', 'Sentiment Analysis', 'Entity Recognition', 'Language Translation', 'Question Answering', 'Information Extraction', 'Summarization', 'Topic Modeling', 'Language Modeling', 'Dialogue Generation', 'Language Inference', 'Commonsense Reasoning', 'Knowledge Graphs', 'Image Recognition', 'Object Detection', 'Image Segmentation', 'Visual Question Answering', 'Image Captioning', 'Generative Adversarial Networks', 'Style Transfer', 'Super Resolution', 'Generative Models', 'Reinforcement Learning', 'Deep Learning', 'Neural Networks', 'Convolutional Neural Networks', 'Recurrent Neural Networks', 'Transformer Networks', 'Self-Supervised Learning', 'Transfer Learning', 'Meta Learning', 'Few-Shot Learning', 'Explainable AI', 'Interpretability', 'Fairness', 'Privacy', 'Security', 'Robustness', 'Generalization', 'Continual Learning', 'Multi-Task Learning', 'Domain Adaptation', 'Data Augmentation', 'Data Bias', 'Data Labeling', 'Data Cleaning', 'Model Compression', 'Model Optimization', 'Model Selection'],
['Cloud Computing', 'Distributed Systems', 'Parallel Computing', 'High Performance Computing', 'Edge Computing', 'Fog Computing', 'Mobile Computing', 'Internet of Things', 'Cybersecurity', 'Big Data Analytics', 'Data Warehousing', 'Data Mining', 'Data Visualization', 'Business Intelligence', 'Data Science', 'Machine Learning Engineering', 'DevOps', 'Continuous Integration', 'Continuous Deployment', 'Agile Software Development', 'Software Testing', 'Software Quality Assurance', 'Software Metrics', 'Software Architecture', 'Microservices', 'Service-Oriented Architecture', 'Blockchain Technology', 'Cryptocurrencies', 'Smart Contracts', 'Decentralized Applications', 'Distributed Ledgers', 'Edge AI', 'Federated Learning', 'Edge Analytics', 'Edge Intelligence', 'Serverless Computing', 'Quantum Computing', 'Quantum Machine Learning', 'Quantum Cryptography', 'Quantum Simulation', 'Quantum Algorithms', 'Quantum Error Correction', 'Quantum Annealing', 'Quantum Supremacy', 'Quantum Internet', 'Quantum Key Distribution', 'Quantum Sensing', 'Quantum Metrology', 'Quantum Communication', 'Quantum Cryptanalysis']
]
class TridequeEntry:
def __init__(self, matrix=None):
if matrix is None:
self.matrix = [[["" for _ in range(1)] for _ in range(50)] for _ in range(2)] # Adjust dimensions according to your trideque matrix size
else:
self.matrix = matrix
def update_entry(self, x, y, z, value):
self.matrix[x][y][z] = value
def get_value(self, x, y, z):
return self.matrix[x][y][z]
trideque_entry = TridequeEntry()
# Functions and async routines
def generate_chunks(prompt, chunk_size=1500):
words = prompt.split()
return [' '.join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
async def gpt3_generate(chunk, max_length=2000, time_limit=50.0):
start_time = time.time()
async def generate_response(chunk):
inputs = tokenizer.encode(chunk, return_tensors='pt', truncation=True, max_length=512)
inputs = inputs.cuda()
attention_mask = inputs.ne(tokenizer.pad_token_id).float()
outputs = model.generate(inputs, max_length=max_length, do_sample=True, max_time=time_limit, attention_mask=attention_mask)
return tokenizer.decode(outputs[0])
response = await generate_response(chunk)
end_time = time.time()
return response, end_time - start_time
async def write_to_buffer_file(response_text):
buffer_file = "buffer.txt"
with open(buffer_file, "w", encoding='utf-8') as file:
file.write(response_text)
return buffer_file
async def read_from_buffer_file(buffer_file, chunk_size=1800):
with open(buffer_file, "r", encoding='utf-8') as file:
while True:
chunk = file.read(chunk_size)
if not chunk:
break
yield chunk
os.remove(buffer_file)
async def send_chunks(ctx, prompt_chunks, repeat_count=-1):
total_time = 0.0
repetition = 0
while repeat_count == -1 or repetition < repeat_count:
for chunk in prompt_chunks:
gpt3_response, response_time = await gpt3_generate(chunk)
total_time += response_time
buffer_file = await write_to_buffer_file(gpt3_response)
async for response_part in read_from_buffer_file(buffer_file):
await asyncio.sleep(0.5)
await ctx.send(response_part)
repetition += 1
await ctx.send(f"Total response time: {total_time:.2f} seconds.")
# Discord bot
intents = discord.Intents.default()
intents.typing = False
intents.presences = False
bot = commands.Bot(command_prefix='!', intents=intents)
@bot.event
async def on_ready():
print(f'Logged in as {bot.user.name} (ID: {bot.user.id})')
@bot.command()
async def trideque(ctx, *, user_input):
await ctx.send('The Matrix is loading, Robotic Reply Generating, please wait...')
prompt_chunks = generate_chunks(user_input)
await send_chunks(ctx, prompt_chunks)
nest_asyncio.apply()
bot.run('botoken')