# HG changeset patch # User Richard # Date 1512164216 0 # Node ID ec0994c0a2a0aa9c7b98a176e8effa230455e0e3 # Parent 369f758afd9b53ff47fe01afbebe0524d0c53862# Parent 196df98347d897c40e9e9333921d5b2b12147ad9 Merged in aseques/pywhois (pull request #26) All the failing tests fixed diff -r 369f758afd9b -r ec0994c0a2a0 test/test_main.py --- a/test/test_main.py Fri Dec 01 21:21:05 2017 +0000 +++ b/test/test_main.py Fri Dec 01 21:36:56 2017 +0000 @@ -19,7 +19,7 @@ def test_ascii_with_schema_path_and_query(self): url = 'https://www.google.com/search?q=why+is+domain+whois+such+a+mess' - domain = 'www.google.com' + domain = 'google.com' self.assertEqual(domain, extract_domain(url)) def test_simple_unicode_domain(self): diff -r 369f758afd9b -r ec0994c0a2a0 whois/__init__.py --- a/whois/__init__.py Fri Dec 01 21:21:05 2017 +0000 +++ b/whois/__init__.py Fri Dec 01 21:36:56 2017 +0000 @@ -79,17 +79,17 @@ if not isinstance(url, str): url = url.decode('utf-8') url = re.sub('^.*://', '', url) - url = url.split('/')[0].lower().encode('idna') + url = url.split('/')[0].lower() # find the longest suffix match domain = b'' - for section in reversed(url.split(b'.')): + for section in reversed(url.split('.')): if domain: domain = b'.' + domain - domain = section + domain + domain = section.encode('utf-8') + domain if domain not in suffixes: break - return domain.decode('idna') + return domain.decode('utf-8') if __name__ == '__main__': diff -r 369f758afd9b -r ec0994c0a2a0 whois/parser.py --- a/whois/parser.py Fri Dec 01 21:21:05 2017 +0000 +++ b/whois/parser.py Fri Dec 01 21:36:56 2017 +0000 @@ -30,6 +30,7 @@ KNOWN_FORMATS = [ '%d-%b-%Y', # 02-jan-2000 + '%d-%m-%Y', # 20-10-2000 '%Y-%m-%d', # 2000-01-02 '%d.%m.%Y', # 2.1.2000 '%Y.%m.%d', # 2000.01.02