diff --git a/Aaditya/main.py b/Aaditya/main.py index 8a55d48..14f99da 100644 --- a/Aaditya/main.py +++ b/Aaditya/main.py @@ -10,13 +10,22 @@ async def on_ready(self): async def on_message(self, message): if message.author == self.user: return + if message.content.startswith("!hi"): await message.channel.send(f"Hi! {message.author.mention}") + elif message.content.startswith("!search"): x = ' '.join(message.content.split()[1:]) await message.channel.send(f"Looking up twitter for {x}") - file = self.scraper.scrape(x) - await message.channel.send(file=discord.File(fp=file, spoiler=False)) + + data = self.scraper.scrape(x, 5) + desc = "" + + for content, link in data.items(): + content = content[:30] + '...' if len(content) > 30 else content + desc += f"- [{content}]({link})\n\n" + + await message.channel.send(embed=discord.Embed(color=discord.Colour.blurple(), title=x, description=desc)) def main(): #only works if command is invoked from the working directory itself diff --git a/Aaditya/scrapper.py b/Aaditya/scrapper.py index 2a4267d..ff86401 100644 --- a/Aaditya/scrapper.py +++ b/Aaditya/scrapper.py @@ -6,7 +6,7 @@ class Scraper: def __init__(self): pass - def scrape(self, text): + def scrape(self, text, num): fQuery=text+' min_faves:4000' data=sntwitter.TwitterSearchScraper(fQuery) tweets=[] @@ -14,10 +14,11 @@ def scrape(self, text): for i,tweet in enumerate(data.get_items()): data1=[tweet.url,tweet.user.username,tweet.rawContent] tweets.append(data1) - if i>50: + if i>num: break fData=pd.DataFrame(tweets,columns='Link Username Content'.split()) - csvFile=text+'.csv' - fData.to_csv(csvFile,index=False) - return csvFile + data = {} + for i in range(num): + data[fData.at[i, 'Content']] = fData.at[i, 'Link'] + return data \ No newline at end of file