Skip to content

Commit

Permalink
Merge pull request #4 from aadi-1024/main
Browse files Browse the repository at this point in the history
WIP implementation
  • Loading branch information
aadi-1024 authored Feb 1, 2023
2 parents 1642820 + d9877a9 commit 9a8fe21
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 4 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
.env
node_modules/
.vscode/
Aaditya/.env
Aaditya/.env
*/__pycache__
7 changes: 6 additions & 1 deletion Aaditya/main.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import discord
from scrapper import Scraper

class Client(discord.Client):

async def on_ready(self):
print("Bot is online!")
self.scraper = Scraper()

async def on_message(self, message):
if message.author == self.user:
Expand All @@ -11,7 +14,9 @@ async def on_message(self, message):
await message.channel.send(f"Hi! {message.author.mention}")
elif message.content.startswith("!search"):
x = ' '.join(message.content.split()[1:])
print(x) #TODO
await message.channel.send(f"Looking up twitter for {x}")
file = self.scraper.scrape(x)
await message.channel.send(file=discord.File(fp=file, spoiler=False))

def main():
#only works if command is invoked from the working directory itself
Expand Down
6 changes: 4 additions & 2 deletions Aaditya/scrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
class Scraper:
def __init__(self):
pass
def scrape(text):
def scrape(self, text):
fQuery=text+' min_faves:4000'
data=sntwitter.TwitterSearchScraper(fQuery)
tweets=[]
Expand All @@ -18,4 +18,6 @@ def scrape(text):
break

fData=pd.DataFrame(tweets,columns='Link Username Content'.split())
#TODO
csvFile=text+'.csv'
fData.to_csv(csvFile,index=False)
return csvFile

0 comments on commit 9a8fe21

Please sign in to comment.