From f108f94c4fc80d70953b2bf42ec81e18a3b80ef5 Mon Sep 17 00:00:00 2001 From: "King Mock (mockk)" Date: Tue, 27 Nov 2018 16:40:29 +0000 Subject: [PATCH] Add files via upload --- Checkings.py | 60 +++++++ MyDictionary.py | 42 +++++ README.md | 35 +--- chat2.py | 99 +++++++++++ exchange.py | 55 +++--- hangman.py | 399 +++++++++++++++++++++---------------------- main2.py | 36 ++++ myconnector.py | 117 +++++++++++++ names.json | 1 + namestore.py | 52 +++--- nametesting.py | 12 ++ parrot.py | 65 +++++++ quote.py | 58 +++---- stackoverflow.py | 401 ++++++++++++++++++++++---------------------- translate.py | 213 +++++++++++------------ weather_realtime.py | 54 ++++++ web.py | 22 +++ wikipedia.py | 326 ++++++++++++++++++----------------- 18 files changed, 1265 insertions(+), 782 deletions(-) create mode 100644 Checkings.py create mode 100644 MyDictionary.py create mode 100644 chat2.py create mode 100644 main2.py create mode 100644 myconnector.py create mode 100644 names.json create mode 100644 nametesting.py create mode 100644 parrot.py create mode 100644 weather_realtime.py create mode 100644 web.py diff --git a/Checkings.py b/Checkings.py new file mode 100644 index 0000000..b28502b --- /dev/null +++ b/Checkings.py @@ -0,0 +1,60 @@ +''' All codes in this file are extracted and editted from https://stackoverflow.com/questions/25798674/python-duplicate-words ''' +def isRepeat(sentence): + words = sentence.split(" ") + words.sort() # ascending sorting + + # keep user original input + words_copied = sentence.split(" ") + words_copied.sort() + + wordL=[] + '''Copied codes to loop through the sentence given.''' + for word in words[0:len(words)-1]: + count = 0 + while True: + try: + index = words_copied.index(word) + count += 1 + del words_copied[index] + except ValueError: + if count > 1: + wordL.append([word,count]) + break + '''Copied code ends''' + if wordL != []: + repeat=True + else: + repeat=False + return repeat + +def CountRepeat(sentence): + words = sentence.split(" ") + words.sort() # ascending sorting + + # keep user original input + words_copied = sentence.split(" ") + words_copied.sort() + + wordL={} + + for word in words[0:len(words)-1]: + count = 0 + while True: + try: + index = words_copied.index(word) + count += 1 + del words_copied[index] + except ValueError: + if count > 1: + wordL.update({word:count}) + break + if wordL != []: + repeat=True + else: + repeat=False + return wordL + +#sentence=input('Enter your sentence : ') +#print(isRepeat(input())) +#print(CountRepeat(sentence)) + diff --git a/MyDictionary.py b/MyDictionary.py new file mode 100644 index 0000000..e07bf56 --- /dev/null +++ b/MyDictionary.py @@ -0,0 +1,42 @@ +# Sentences for responding the user +MyDictionary={ + "GREETING_KEYWORDS":["hello", "hi", "greetings", "sup", "what's up"], + "GREETING_RESPONSES":["'sup bro", "hey", "*nods*", "hey you get my snap?"], + "REFUSE":["no","nah","not"], + "AGREE":["yes","sure","agreed","why not"], + "EMOTION_POSITIVE":["happy","thrilled","enjoyed","yo","high spirits"], + "EMOTION_NEGATIVE":["sad","blue","dissatisfied","heck","hell no"] +} + +def ref(category): + catName=[] + for j,k in MyDictionary.items(): #listing a whole dictionary + catName.append(j) + for i in catName: + if (category.upper()).strip() == i: + return i + if (category.upper()).strip() not in catName: + return False + + + +'''Function checks if a value is in a specified dictionary''' +def isRef(word): + try: + for j,k in MyDictionary.items(): #listing a whole dictionary + for i in k: #looping through the dictionary + for p in word.split(): + if (p.lower()).strip()==i: + return True + else: + continue + continue + continue + except: + print("Invalid input!") + print("Type of i: ",type(i),";type of p: ",type(p)) + else: + return False + +#print(isRef(input("Test a word: "))) +#print(ref(input("Test a word: "))) diff --git a/README.md b/README.md index ed8d54f..1bd47fc 100644 --- a/README.md +++ b/README.md @@ -2,37 +2,8 @@ Multi-purpose discord chatbot for module 4006CEM - Oct/Nov 2018 -Usage: `python3 connector.py` +Usage: python3 connector.py -Libraries being used: - - requests (http://docs.python-requests.org/en/master/) by Kenneth Reitz for web requests - - bs4 (https://www.crummy.com/software/BeautifulSoup/) by Leonard Richardson for parsing HTML pages - - discord.py (https://github.com/Rapptz/discord.py/) by Rapptz used to connect to Discord. - - selenium (https://docs.seleniumhq.org/) by (originally) Jason Huggins used to scrap pages that use javascript calls for data (translate module) +# Python 3.5 or larger must be installed due to asynchronous functions -How to get god-forsaken Selenium to work: -(https://selenium-python.readthedocs.io/installation.html) - -Step1. sudo python3 -m pip install selenium - -Step2. download Driver for your browser (I chose Chrome, download links in the link above) - -Step3. Set this part in your code to match your locations of: 1. Driver you've just downloaded, 2. Browser's exe location - - from selenium import webdriver - from selenium.webdriver.chrome.options import Options - - chrome_options = Options() - chrome_options.binary_location = r"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe" - chrome_driver_binary = "/mnt/c/Users/your_username/chromedriver.exe" - -Step4. When creating a driver, do it like this: - - driver = webdriver.Chrome(chrome_driver_binary, chrome_options = chrome_options) - -Step5. Pray to lord that it is working now - - - - -# Python 3.5 or 3.6 must be installed due to asynchronous functions +# Please state which modules/components needed to be installed before executing your program, thanks! diff --git a/chat2.py b/chat2.py new file mode 100644 index 0000000..65a95d1 --- /dev/null +++ b/chat2.py @@ -0,0 +1,99 @@ +from MyDictionary import * +def social(chat): + import random + try: + user=input("Can you share your life with me? ") + except: + print("Invalid input!") + else: + for i in user.split(): + if i.lower() in MyDictionary["REFUSE"] or i.lower() in MyDictionary["Emotion_negative"]: + try: + user=input("I'm so sorry affecting your bad mood. So do you want to leave, or stay with my other services?(leave/service) ") + except: + print("Invalid input!") + else: + for j in user.split(): + if (str(j.lower())).strip() in ["leave"]: + exit() + elif (str(j.lower())).strip() in ["service"]: + from main2 import main2 + main2() + else: + print("I don't know what the hell are yo saying dude!!!") + try: + chat=input("Say again please: ") + except: + print("Invalid input!") + else: + social(chat) + else: + for j in user.split(): + if str(j.lower()) in Emotion_positive: + em=True + break + else: + em=False + continue + print(em) + res="I'm quite " + from parrot import find_longest_word + if em==True: + print(res,"glad to hear that you are",find_longest_word(user.split()),".") + else: + try: + user=input("Sorry what do you mean by ",find_longest_word(user.split()),"?") + except: + print("Invalid input!") + finally: + print(res,"disappointed to hear that you said",find_longest_word(user.split()),".") + + +'''Greet each other''' +def greet(): + import random + while True: + res="" + found=False + try: + user=input("Hello, what's up bro? ") + for i in user.split(): + if i.lower() in GREETING_KEYWORDS: + print(GREETING_RESPONSES[random.randint(1,len(GREETING_RESPONSES))-1] ) # random generate a response based on user input + found=True + else: + if i.index(user.split())`") + else: + await client.send_message(message.channel, 'Your name is ' + name.strip() + ".") + + + elif message.content.lower().startswith('!quote') or message.content.lower().startswith('!q'): + editable = await client.send_message(message.channel, 'Searching for zesty quotes...') + genQ = await quote.generateQuote() + await client.edit_message(editable, genQ) + + + ################################################## + # Konrad Czarniecki + + # command is !wiki search_word + elif message.content.lower().startswith('!wiki'): + await wikipedia.wikipedia(client, message) + + # command is !flow search_phrase_here. Then '!f na' for next answer, '!f nq' for next question + elif message.content.lower().startswith('!flow'): + await stackoverflow.stackoverflow(client, message) + + # command is !translate from_language to target_language your_query e.g !translate english to french i hate selenium + elif message.content.lower().startswith('!translate'): + await translatesimple.translate(client, message) + + elif message.content.lower().startswith('!hangman'): + await hangman.hangman(client, message) + + ################################################## + + elif message.content.lower().startswith('!help'): + await client.send_message(message.channel, commands()) + + elif message.content.lower().startswith('weather'): + await weather_realtime.weather(client,message) + + # sorry bout that, this part is causing some errors with stackoverflow module + # elif message.content.lower().startswith('!'): + # await client.send_message(message.channel, "Unknown command! Use `!help` for a command/usage list.") + + +#This token is linked to mockk's discord account/applications - sets bot account +client.run('NTA3MTUwNDA3NjM0NTE4MDE3.Dtx6SA.VCa8D43Kdlbq0pETGjopiZgULiA') diff --git a/names.json b/names.json new file mode 100644 index 0000000..d27c7e1 --- /dev/null +++ b/names.json @@ -0,0 +1 @@ +{"123":"test"} \ No newline at end of file diff --git a/namestore.py b/namestore.py index 297f4a1..41c2f55 100644 --- a/namestore.py +++ b/namestore.py @@ -1,26 +1,26 @@ -import json - -""" -#This function is unused and deprecated because the async ruins the long term storage in memory. It remains to show work done -async def load_names(): - with open("names.json") as f: - names = json.load(f) -""" - -#Opens names.json, updates it with the new name -async def set_name(id, name): - with open("names.json") as f: - names = json.load(f) - - names[str(id)] = name - json.dump(names, open("names.json","w+")) - -#Opens names.json, checks if the user has a name and returns None if not -async def get_name(id): - with open("names.json") as f: - names = json.load(f) - - if str(id) in names: - return names[str(id)].capitalize() - else: - return None \ No newline at end of file +import json + +""" +#This function is unused and deprecated because the async ruins the long term storage in memory. It remains to show work done +async def load_names(): + with open("names.json") as f: + names = json.load(f) +""" + +#Opens names.json, updates it with the new name +async def set_name(id, name): + with open("names.json") as f: + names = json.load(f) + + names[str(id)] = name + json.dump(names, open("names.json","w+")) + +#Opens names.json, checks if the user has a name and returns None if not +async def get_name(id): + with open("names.json") as f: + names = json.load(f) + + if str(id) in names: + return names[str(id)] + else: + return None diff --git a/nametesting.py b/nametesting.py new file mode 100644 index 0000000..e600d0d --- /dev/null +++ b/nametesting.py @@ -0,0 +1,12 @@ +def nametest(name): + temp="" + for i in name.split(): + if i in range(ord("A"),ord("Z")+1): + #temp=name + name=name[name.find(i):name.find(" ",name.find(i))] + print("Entered loop") + break + #name=temp + return name.strip() + +print("Hi, ",nametest(input("Hi! What's your name? ")),", nice to meet you.") diff --git a/parrot.py b/parrot.py new file mode 100644 index 0000000..3ed9d0e --- /dev/null +++ b/parrot.py @@ -0,0 +1,65 @@ +'''this is a function to find the longest word in a sentence, usually the main point''' +def find_longest_word(word_list): + suffix=["ly","ed","ing","ful"] + longest_word="" + for word in word_list: + if len(word)>len(longest_word): + longest_word=word + else: + continue + aci=[ord(i) for i in longest_word.strip()] + for k in aci: + if (int(k)>=65 and int(k)<=90) or (int(k)>=97 and int(k)<=122): + continue + else: + aci.remove(k) + longest_word="".join(chr(i) for i in aci) + +#smart chatting feature + for j in suffix: + if longest_word.endswith(j)==True: + s2=" ".join(word_list).split(longest_word,1)[1] + if s2.isalpha()==False: + s2=str((s2.split())[0]) + longest_word=longest_word+" "+s2 +#smart part ends + + return longest_word +'''function completed''' + + +def MainParrot(): + while True: + print("Hello! Seems like you're a dumb, don't you?.") + user=input().split() + prompt=find_longest_word(user) + while True: + print("You've just said ",prompt,",isn't it?") + chat=input() + if chat in "no": + print("What do you mean by ",prompt," then? ",end="\t") + chat=input() + else: + break + prompt=find_longest_word(chat) + chat=input("Want to chat again? (yes) ") + for j in (chat.lower()).strip(): + if j in ["yes"]: + status=True + else: + status=False + if status==False: + user=input("Want my kindest service again?(yes) ") + for j in user.lower(): + if j in ["yes"]: + status=True + else: + status=False + if status==True: + from main2 import main2 + main2() + else: + break + if status==False: + break + return diff --git a/quote.py b/quote.py index a519d4f..83ccca5 100644 --- a/quote.py +++ b/quote.py @@ -1,28 +1,30 @@ -""" -Libraries: - - requests (http://docs.python-requests.org/en/master/) by Kenneth Reitz for web requests - - bs4 (https://www.crummy.com/software/BeautifulSoup/) by Leonard Richardson for parsing HTML pages -""" -import json -import requests -from bs4 import BeautifulSoup - -#Edits 'message' with a random quote, courtesy of quotesondesign.com -async def generateQuote(): - - url = 'http://quotesondesign.com/wp-json/posts?filter[orderby]=rand&filter[posts_per_page]=1' - response = requests.get(url).text #This returns the literal plaintext returned from quotesondesign.com - - try: - - data = json.loads(response) #We use 'loads' because we are loading a string instead of a file - - quote = BeautifulSoup(data[0]["content"], features="html5lib").find("p").string.strip() # Returns the quote with HTML entities decoded - - author = BeautifulSoup("

"+data[0]["title"]+ "

", features="html5lib").find("p").string.strip() # Returns the author with HTML entities decoded - - return "As " + author + " once said, '" + quote + "'" - - except ValueError as e: #Some quotes returned break the python json parser. This will use recursion to fetch a new one if this happens! - - return generateQuote() +'''Code is written by Ben''' + +""" +Libraries: + - requests (http://docs.python-requests.org/en/master/) by Kenneth Reitz for web requests + - bs4 (https://www.crummy.com/software/BeautifulSoup/) by Leonard Richardson for parsing HTML pages +""" +import json +import requests +from bs4 import BeautifulSoup + +#Edits 'message' with a random quote, courtesy of quotesondesign.com +async def generateQuote(): + + url = 'http://quotesondesign.com/wp-json/posts?filter[orderby]=rand&filter[posts_per_page]=1' + response = requests.get(url).text #This returns the literal plaintext returned from quotesondesign.com + + try: + + data = json.loads(response) #We use 'loads' because we are loading a string instead of a file + + quote = BeautifulSoup(data[0]["content"], features="html5lib").find("p").string.strip() # Returns the quote with HTML entities decoded + + author = BeautifulSoup("

"+data[0]["title"]+ "

", features="html5lib").find("p").string.strip() # Returns the author with HTML entities decoded + + return "As " + author + " once said, '" + quote + "'" + + except ValueError as e: #Some quotes returned break the python json parser. This will use recursion to fetch a new one if this happens! + + return generateQuote() diff --git a/stackoverflow.py b/stackoverflow.py index 480ed97..ba0b668 100644 --- a/stackoverflow.py +++ b/stackoverflow.py @@ -1,199 +1,202 @@ -import requests -from bs4 import BeautifulSoup - - -async def stackoverflow(client, message): - - url = message.content[6:] - url = url.lower().split(' ') - url = "+".join(url) - url = 'https://www.google.com/search?q=' + url + '+site%3Astackoverflow.com' - - print(url) - - #preventing google from blocking bots - headers = { - 'User-Agent': 'Mozilla/5.0' - #'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0' - #'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0' - #'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36' - } - - def parsePage(url): - """take url as a string and parse it""" - page = requests.get(url, headers=headers) - soup = BeautifulSoup(page.text, 'html.parser') - return soup - - soup = parsePage(url) - - - #delete image links - imgLinks = soup.find_all('img') - delete = [] - for link in imgLinks: - badLink = link['title'] - if 'meta' in badLink: - continue - else: - delete.append(badLink) - - #search for google search results and put them in 'good' list - searches = soup.find_all('a') - - x=0 - good = [] - for link in searches: - Flink = link['href'] - Flink = Flink[7:] - if 'questions' in Flink: - if 'webcache' in Flink: - continue - elif 'meta' in Flink: - continue - else: - good.append(Flink) - for match in delete: - if match in Flink: - good.remove(Flink) - else: - continue - - else: - continue - - def postPage(result): - """take in url as string and find a post on stackoverflow""" - soup1 = parsePage(good[result]) - post = soup1.find_all(class_='post-text') - return post - - def chunks(s, n): - """output string s in n chunks""" - chunkList = [] - length = len(s) - count = int(length / n) - num = 0 - k = n - for i in range(count): - chunk = s[num:k] - num = k - k = k + n - chunkList.append(chunk) - - rest = length % n - end = s[-rest:] - - chunkList.append(end) - - return chunkList - - - - - - - #print first question - page = 0 - answer = 1 - try: - await client.send_message(message.channel, 'URL ' + '<' + good[page] + '>') - await client.send_message(message.channel, '.\n\n__**Question**__ ' + '**' + str(1) + '**' + postPage(page)[0].text) - except: - #in case question is longer that discord's message limit which is 2000 characters - try: - long = postPage(page)[0].text - partedMessage = chunks(long, 1000) - - await client.send_message(message.channel, '.\n\n__**Question**__ ' + '**' + str(1) + '**') - - for i in partedMessage: - await client.send_message(message.channel, i) - except: - await client.send_message(message.channel, 'Sorry, no questions found :( Try again.') - - #print first answer - try: - await client.send_message(message.channel, '.\n\n\n\n__**Answer**__ ' + '**' + str(1) + '**' + postPage(page)[answer].text) - await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") - except: - # in case answer is longer that discord's message limit which is 2000 characters - long = postPage(page)[answer].text - partedMessage = chunks(long, 1000) - - await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(1) + '**') - - for i in partedMessage: - await client.send_message(message.channel, i) - - await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") - - - #this loop allows user to scroll through all answers on stackoverflow page, and gives and option to move to next page - while page < (len(good)-1): - # waits for user input - message = await client.wait_for_message(author=message.author) - choice = message.content - - if '!f' not in choice: - await client.send_message(message.channel, 'Wrong command.') - #if users want to see next answer - elif 'stop' in choice: - break - elif 'na' in choice: - answer = answer + 1 - try: - await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(answer) + '**' + postPage(page)[answer].text) - await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") - except: - try: - # in case answer is longer that discord's message limit which is 2000 characters - long = postPage(page)[answer].text - partedMessage = chunks(long, 1000) - - await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(1) + '**') - - for i in partedMessage: - await client.send_message(message.channel, i) - - await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") - except: - await client.send_message(message.channel, "No more answers. Type '!f nq' for next question.") - - # if users want to see next question (google result) - elif 'nq' in choice: - answer = 1 - page = page + 1 - print(page) - try: - await client.send_message(message.channel, 'URL ' + '<' + good[page] + '>') - await client.send_message(message.channel, '.\n\n__**Question**__ ' + '**' + str(page + 1) + '**' + postPage(page)[0].text) - except: - # in case question is longer that discord's message limit which is 2000 characters - long = postPage(page)[0].text - partedMessage = chunks(long, 1000) - - await client.send_message(message.channel, '.\n\n__**Question**__ ' + '**' + str(page + 1) + '**') - - for i in partedMessage: - await client.send_message(message.channel, i) - - try: - await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(1) + '**' + postPage(page)[answer].text) - await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") - except: - try: - # in case answer is longer that discord's message limit which is 2000 characters - long = postPage(page)[answer].text - partedMessage = chunks(long, 1000) - - await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(1) + '**') - - for i in partedMessage: - await client.send_message(message.channel, i) - await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") - except: - await client.send_message(message.channel, "No more answers. Type '!f nq' for next question.") - - - await client.send_message(message.channel, 'No more questions :( ') \ No newline at end of file +'''Code is written by Ben.''' + + +import requests +from bs4 import BeautifulSoup + + +async def stackoverflow(client, message): + + url = message.content[6:] + url = url.lower().split(' ') + url = "+".join(url) + url = 'https://www.google.com/search?q=' + url + '+site%3Astackoverflow.com' + + print(url) + + #preventing google from blocking bots + headers = { + 'User-Agent': 'Mozilla/5.0' + #'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:64.0) Gecko/20100101 Firefox/64.0' + #'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0' + #'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36' + } + + def parsePage(url): + """take url as a string and parse it""" + page = requests.get(url, headers=headers) + soup = BeautifulSoup(page.text, 'html.parser') + return soup + + soup = parsePage(url) + + + #delete image links + imgLinks = soup.find_all('img') + delete = [] + for link in imgLinks: + badLink = link['title'] + if 'meta' in badLink: + continue + else: + delete.append(badLink) + + #search for google search results and put them in 'good' list + searches = soup.find_all('a') + + x=0 + good = [] + for link in searches: + Flink = link['href'] + Flink = Flink[7:] + if 'questions' in Flink: + if 'webcache' in Flink: + continue + elif 'meta' in Flink: + continue + else: + good.append(Flink) + for match in delete: + if match in Flink: + good.remove(Flink) + else: + continue + + else: + continue + + def postPage(result): + """take in url as string and find a post on stackoverflow""" + soup1 = parsePage(good[result]) + post = soup1.find_all(class_='post-text') + return post + + def chunks(s, n): + """output string s in n chunks""" + chunkList = [] + length = len(s) + count = int(length / n) + num = 0 + k = n + for i in range(count): + chunk = s[num:k] + num = k + k = k + n + chunkList.append(chunk) + + rest = length % n + end = s[-rest:] + + chunkList.append(end) + + return chunkList + + + + + + + #print first question + page = 0 + answer = 1 + try: + await client.send_message(message.channel, 'URL ' + '<' + good[page] + '>') + await client.send_message(message.channel, '__**Question**__ ' + '**' + str(1) + '**' + postPage(page)[0].text) + except: + #in case question is longer that discord's message limit which is 2000 characters + try: + long = postPage(page)[0].text + partedMessage = chunks(long, 1000) + + await client.send_message(message.channel, '__**Question**__ ' + '**' + str(1) + '**') + + for i in partedMessage: + await client.send_message(message.channel, i) + except: + await client.send_message(message.channel, 'Sorry, no questions found :( Try again.') + + #print first answer + try: + await client.send_message(message.channel, '.\n\n\n\n__**Answer**__ ' + '**' + str(1) + '**' + postPage(page)[answer].text) + await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") + except: + # in case answer is longer that discord's message limit which is 2000 characters + long = postPage(page)[answer].text + partedMessage = chunks(long, 1000) + + await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(1) + '**') + + for i in partedMessage: + await client.send_message(message.channel, i) + + await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") + + + #this loop allows user to scroll through all answers on stackoverflow page, and gives and option to move to next page + while page < (len(good)-1): + # waits for user input + message = await client.wait_for_message(author=message.author) + choice = message.content + + if '!f' not in choice: + await client.send_message(message.channel, 'Wrong command.') + #if users want to see next answer + elif 'stop' in choice: + break + elif 'na' in choice: + answer = answer + 1 + try: + await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(answer) + '**' + postPage(page)[answer].text) + await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") + except: + try: + # in case answer is longer that discord's message limit which is 2000 characters + long = postPage(page)[answer].text + partedMessage = chunks(long, 1000) + + await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(1) + '**') + + for i in partedMessage: + await client.send_message(message.channel, i) + + await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") + except: + await client.send_message(message.channel, "No more answers. Type '!f nq' for next question.") + + # if users want to see next question (google result) + elif 'nq' in choice: + answer = 1 + page = page + 1 + print(page) + try: + await client.send_message(message.channel, 'URL ' + '<' + good[page] + '>') + await client.send_message(message.channel, '__**Question**__ ' + '**' + str(page + 1) + '**' + postPage(page)[0].text) + except: + # in case question is longer that discord's message limit which is 2000 characters + long = postPage(page)[0].text + partedMessage = chunks(long, 1000) + + await client.send_message(message.channel, '__**Question**__ ' + '**' + str(page + 1) + '**') + + for i in partedMessage: + await client.send_message(message.channel, i) + + try: + await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(1) + '**' + postPage(page)[answer].text) + await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") + except: + try: + # in case answer is longer that discord's message limit which is 2000 characters + long = postPage(page)[answer].text + partedMessage = chunks(long, 1000) + + await client.send_message(message.channel, '__**Answer**__ ' + '**' + str(1) + '**') + + for i in partedMessage: + await client.send_message(message.channel, i) + await client.send_message(message.channel, "Type '!f na' for next answer or '!f nq' for next relevant question") + except: + await client.send_message(message.channel, "No more answers. Type '!f nq' for next question.") + + + await client.send_message(message.channel, 'No more questions :( ') diff --git a/translate.py b/translate.py index 54f32d9..f53885c 100644 --- a/translate.py +++ b/translate.py @@ -1,106 +1,107 @@ -import requests -from selenium import webdriver -from bs4 import BeautifulSoup -from selenium.webdriver.chrome.options import Options -from selenium.webdriver.common.by import By - - -#library used: -# - Selenium (https://docs.seleniumhq.org/) - - - -async def translate(client, message): - - def parsePage(url): - """take url as a string and parse it""" - page = requests.get(url) - soup = BeautifulSoup(page.content, 'html.parser') - return soup - - - def getLanguage(lang): - """take language name e.g. english and change it to en""" - soup = parsePage('http://www.lingoes.net/en/translator/langcode.htm') - - languages = [] - - for i in soup.find_all('td'): - languages.append(i.text.lower()) - - lang.lower() - - num = languages.index(lang) - - goal = languages[num - 1] - if goal == 'zh': - goal = goal + '-CN' - return goal - - def tearDown(self): - self.quit() - - - fullQuery = message.content[11:] - queryList = fullQuery.lower().split(' ') - fromLan = getLanguage(queryList[0]) - toLan = getLanguage(queryList[2]) - - for i in range(3): - queryList.remove(queryList[0]) - - query = "%20".join(queryList) - - url = 'https://translate.google.com/?hl=en#%s/%s/' % (fromLan, toLan) + query - - print(url) - - #preventing google from blocking bots - - #agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36' - #headers = { - # 'User-Agent': agent - #} - - - - - def parsePageSelenium(url): - """take url as a string and parse it using selenium""" - chrome_options = Options() - chrome_options.binary_location = r"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe" - chrome_driver_binary = "/mnt/c/Users/School/Documents/chromedriver.exe" - - #error - #chrome_options.add_argument("--no-startup-window") - #opts.add_argument("no-sandbox") - #opts.add_argument("--disable-extensions") - chrome_options.add_argument("--headless") - - driver = webdriver.Chrome(chrome_driver_binary, chrome_options = chrome_options) - - driver.get(url) - - return driver - - driver = parsePageSelenium(url) - - soup = driver.find_element(By.XPATH, ".//span[@id='result_box']/span") - - result = soup.text - await client.send_message(message.channel, 'Translation: ' + result) - - tearDown(driver) - - - - #result = soup.find('span', id_='result_box') - #result = soup.select('span.result_box') - - #for link in soup.find_all('span'): - #print(link) - - #result = soup.find_all('span') - #result = result.find('result_box') - - +'''Code is written by Ben.''' + + +import requests +from selenium import webdriver +from bs4 import BeautifulSoup +from selenium.webdriver.chrome.options import Options +from selenium.webdriver.common.by import By + + +#library used: +# - Selenium (https://docs.seleniumhq.org/) + + + +async def translate(client, message): + + def parsePage(url): + """take url as a string and parse it""" + page = requests.get(url) + soup = BeautifulSoup(page.content, 'html.parser') + return soup + + + def getLanguage(lang): + """take language name e.g. english and change it to en""" + soup = parsePage('http://www.lingoes.net/en/translator/langcode.htm') + + languages = [] + + for i in soup.find_all('td'): + languages.append(i.text.lower()) + + lang.lower() + + num = languages.index(lang) + + goal = languages[num - 1] + if goal == 'zh': + goal = goal + '-CN' + return goal + + def tearDown(self): + self.quit() + + + fullQuery = message.content[11:] + queryList = fullQuery.lower().split(' ') + fromLan = getLanguage(queryList[0]) + toLan = getLanguage(queryList[2]) + + for i in range(3): + queryList.remove(queryList[0]) + + query = "%20".join(queryList) + + url = 'https://translate.google.com/?hl=en#%s/%s/' % (fromLan, toLan) + query + + print(url) + + #preventing google from blocking bots + + #agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36' + #headers = { + # 'User-Agent': agent + #} + + + + + def parsePageSelenium(url): + """take url as a string and parse it using selenium""" + chrome_options = Options() + chrome_options.binary_location = r"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe" + chrome_driver_binary = "/mnt/c/Users/School/Documents/chromedriver.exe" + + #error + #chrome_options.add_argument("--no-startup-window") + #opts.add_argument("no-sandbox") + #opts.add_argument("--disable-extensions") + chrome_options.add_argument("--headless") + + driver = webdriver.Chrome(chrome_driver_binary, chrome_options = chrome_options) + + driver.get(url) + + return driver + + driver = parsePageSelenium(url) + + soup = driver.find_element(By.XPATH, ".//span[@id='result_box']/span") + + result = soup.text + await client.send_message(message.channel, 'Translation: ' + result) + + tearDown(driver) + + + + #result = soup.find('span', id_='result_box') + #result = soup.select('span.result_box') + + #for link in soup.find_all('span'): + #print(link) + + #result = soup.find_all('span') + #result = result.find('result_box') diff --git a/weather_realtime.py b/weather_realtime.py new file mode 100644 index 0000000..cdb02d0 --- /dev/null +++ b/weather_realtime.py @@ -0,0 +1,54 @@ +import discord +import time +import requests + +async def weather(client,message): + '''Code extracted and editted from: https://pypi.org/project/weather-api/ ''' + ''' weather-api 1.0.6 ''' + '''Return real time weather to users. Need Internet access!!! ''' + #version1 + from weather import Weather, Unit + + weather = Weather(unit=Unit.CELSIUS) + + lookup = weather.lookup(560743) + condition = lookup.condition + + msg="It is "+((condition.text)).lower()+" today." + await client.send_message(message.channel, msg) + + await client.send_message(message.channel, "Specific weather details: ") + + #version2 + #weather = Weather(unit=Unit.CELSIUS) + #location = weather.lookup_by_location('dublin') + #condition = location.condition + #print(condition.text) + + while True: + try: + weather = Weather(unit=Unit.CELSIUS) + async client.send_message(message.channel,"Which place's weather do you want me to tell you? ") + @client.event + place=message.content + location = weather.lookup_by_location(place.strip()) + forecasts = location.forecast + except: + await client.send_message(message.channel, "Invalid location!") + else: + for forecast in forecasts: + msg=forecast.date+" will be "+((forecast.text).lower()).strip(),"." + await client.send_message(message.channel, msg) + msg=forecast.date+":"+forecast.low+"℃ to "+forecast.high+"℃." + await client.send_message(message.channel, msg) + msg="On this day in "+place.strip()+", you should \t" + await client.send_message(message.channel,msg) + if float(forecast.low)>25 or float(forecast.high)>30: + msg=msg+"drink more water and beware of heat stroke." + elif float(forecast.low)<0 or float(forecast.high)<13: + msg=msg+"wear more clothes before outing. Beware of cold temperature otherwise you'll get a cold." + else: + msg=msg+"do nothing but enjoy your day, because this is a normal day." + await client.edit_message(message.channel,msg) + break + '''Extracted code ends.''' diff --git a/web.py b/web.py new file mode 100644 index 0000000..8745ef8 --- /dev/null +++ b/web.py @@ -0,0 +1,22 @@ +import requests +from bs4 import BeautifulSoup +import webbrowser + +page=requests.get("https://www.youtube.com/") + +soup=BeautifulSoup(page.text,"html.parser") + + +webbrowser.open("https://www.youtube.com/",new=2) +if webbrowser.open("https://www.youtube.com/",new=2)==True: #status printing + print("Successful browser, see in a few seconds.") + print("Status: ",page) +else: + print("I'm sorry. The browser isn't working though.") + print("Status: ",page) + + +#artist_name_list=soup.find(class_="BodyText") + +#artist_name_list_items=artist_name_list.find_all("a") + diff --git a/wikipedia.py b/wikipedia.py index 28c1da4..df07321 100644 --- a/wikipedia.py +++ b/wikipedia.py @@ -1,167 +1,159 @@ -import requests -from bs4 import BeautifulSoup -import discord - -""" -Libraries: - - requests (http://docs.python-requests.org/en/master/) by Kenneth Reitz for web requests - - bs4 (https://www.crummy.com/software/BeautifulSoup/) by Leonard Richardson for parsing HTML pages - - discord.py (https://github.com/Rapptz/discord.py/) by Rapptz used to connect to Discord. -""" - - -async def wikipedia(client, message): - - title = message.content[6:] - title = title.lower().split(' ') - title = "_".join(title) - - - URL = "https://en.wikipedia.org/wiki/" + title - - - def pageContentFun(url): - """Take URL as string and parse its body""" - page = requests.get(url) - - soup = BeautifulSoup(page.text, 'html.parser') - - x = soup.find(class_='mw-parser-output') - - #deletes parts of html that sometimes make the function find wrong body - deleteEmpty = x.find_all(class_='mw-empty-elt') - for empty in deleteEmpty: - empty.decompose() - - deleteTable = x.find('table') - deleteTable.decompose() - - deleteTable = x.find('table') - deleteTable.decompose() - - - return x - - def findImage(url): - """Take url as string and find first img in body and return its url""" - page = requests.get(url) - - soup = BeautifulSoup(page.text, 'html.parser') - - image = soup.find(class_='image') - - img = image.find('img') - - imgURL = img['src'] - - if 'Question_book' in imgURL: - image.decompose() - - image = soup.find(class_='image') - - img = image.find('img') - - imgURL = img['src'] - - return imgURL - - def findP(x): - """take site's body found earlier and look for first paragraph""" - pageContentp = x.find_all('p') - - - if 'Coordinates' in pageContentp[0].text: - y = pageContentp[1].text - else: - y = pageContentp[0].text - - return y - - - # finds page's first paragraph and first image - pageContent = pageContentFun(URL) - cleanText = findP(pageContent) - imgURL = findImage(URL) - - - - #some wiki sites give you few option to choose from. Filter those sites - if "most commonly refers to:" in cleanText: - - #find part of page that is a list of options - pageContent = pageContent.find('ul') - pageContent = pageContent.find_all('a') - - #list of names of options - list1 = [] - - #list of links of listed options - list2 = [] - x = 1 - for link in pageContent: - linkUrl = link['href'] - - fullLink = 'https://en.wikipedia.org' + linkUrl - list2.append(fullLink) - - - linkUrl = linkUrl[6:] - linkUrl = linkUrl.split('_') - linkUrl = " ".join(linkUrl) - - linkUrl = str(x) + '. ' + linkUrl - x=x+1 - list1.append(linkUrl) - - - await client.send_message(message.channel, 'Which one exactly?') - - #display list of options - for item in list1: - await client.send_message(message.channel, item) - - #waits for user input (a number) which is then saved to variable number - message = await client.wait_for_message(author=message.author) - number = message.content - - - numberChosen = int(number) - URL = list2[numberChosen-1] - - pageContent = pageContentFun(URL) - cleanText = findP(pageContent) - imgURL = findImage(URL) - - - - # cleaning text from eg [1], [2] , [1 note] - x = 1 - while x < 40: - cleanText = cleanText.replace('[' + str(x) + ']', '') - x = x + 1 - - cleanText = cleanText.replace('( listen)', '') - cleanText = cleanText.replace('[note 1]', '') - cleanText = cleanText.replace('[note 2]', '') - - - - imgFull = 'https:' + imgURL - URLnolink = '<' + URL + '>' - em = discord.Embed() - em.set_image(url=imgFull) - - await client.send_message(message.channel, embed=em) - await client.send_message(message.channel, cleanText) - await client.send_message(message.channel, 'Wikipedia: ' + URLnolink) - - - - - - - - - - - +'''Code is written by Ben.''' + + +import requests +from bs4 import BeautifulSoup +import discord + +""" +Libraries: + - requests (http://docs.python-requests.org/en/master/) by Kenneth Reitz for web requests + - bs4 (https://www.crummy.com/software/BeautifulSoup/) by Leonard Richardson for parsing HTML pages + - discord.py (https://github.com/Rapptz/discord.py/) by Rapptz used to connect to Discord. +""" + + +async def wikipedia(client, message): + + title = message.content[6:] + title = title.lower().split(' ') + title = "_".join(title) + + + URL = "https://en.wikipedia.org/wiki/" + title + + + def pageContentFun(url): + """Take URL as string and parse its body""" + page = requests.get(url) + + soup = BeautifulSoup(page.text, 'html.parser') + + x = soup.find(class_='mw-parser-output') + + #deletes parts of html that sometimes make the function find wrong body + deleteEmpty = x.find_all(class_='mw-empty-elt') + for empty in deleteEmpty: + empty.decompose() + + deleteTable = x.find('table') + deleteTable.decompose() + + deleteTable = x.find('table') + deleteTable.decompose() + + + return x + + def findImage(url): + """Take url as string and find first img in body and return its url""" + page = requests.get(url) + + soup = BeautifulSoup(page.text, 'html.parser') + + image = soup.find(class_='image') + + img = image.find('img') + + imgURL = img['src'] + + if 'Question_book' in imgURL: + image.decompose() + + image = soup.find(class_='image') + + img = image.find('img') + + imgURL = img['src'] + + return imgURL + + def findP(x): + """take site's body found earlier and look for first paragraph""" + pageContentp = x.find_all('p') + + + if 'Coordinates' in pageContentp[0].text: + y = pageContentp[1].text + else: + y = pageContentp[0].text + + return y + + + # finds page's first paragraph and first image + pageContent = pageContentFun(URL) + cleanText = findP(pageContent) + imgURL = findImage(URL) + + + + #some wiki sites give you few option to choose from. Filter those sites + if "most commonly refers to:" in cleanText: + + #find part of page that is a list of options + pageContent = pageContent.find('ul') + pageContent = pageContent.find_all('a') + + #list of names of options + list1 = [] + + #list of links of listed options + list2 = [] + x = 1 + for link in pageContent: + linkUrl = link['href'] + + fullLink = 'https://en.wikipedia.org' + linkUrl + list2.append(fullLink) + + + linkUrl = linkUrl[6:] + linkUrl = linkUrl.split('_') + linkUrl = " ".join(linkUrl) + + linkUrl = str(x) + '. ' + linkUrl + x=x+1 + list1.append(linkUrl) + + + await client.send_message(message.channel, 'Which one exactly?') + + #display list of options + for item in list1: + await client.send_message(message.channel, item) + + #waits for user input (a number) which is then saved to variable number + message = await client.wait_for_message(author=message.author) + number = message.content + + + numberChosen = int(number) + URL = list2[numberChosen-1] + + pageContent = pageContentFun(URL) + cleanText = findP(pageContent) + imgURL = findImage(URL) + + + + # cleaning text from eg [1], [2] , [1 note] + x = 1 + while x < 40: + cleanText = cleanText.replace('[' + str(x) + ']', '') + x = x + 1 + + cleanText = cleanText.replace('( listen)', '') + cleanText = cleanText.replace('[note 1]', '') + cleanText = cleanText.replace('[note 2]', '') + + + + imgFull = 'https:' + imgURL + URLnolink = '<' + URL + '>' + em = discord.Embed() + em.set_image(url=imgFull) + + await client.send_message(message.channel, embed=em) + await client.send_message(message.channel, cleanText) + await client.send_message(message.channel, 'Wikipedia: ' + URLnolink)