aboutsummaryrefslogtreecommitdiff
path: root/challenge-014
diff options
context:
space:
mode:
authorYozen Hernandez <yzhernand@gmail.com>2019-06-30 13:57:37 -0400
committerYozen Hernandez <yzhernand@gmail.com>2019-06-30 13:57:37 -0400
commit340077561429f72df4ef99b7a00d63c8c267d0e1 (patch)
treef8d1a246955b467e2da510755359a8fce5a17cbe /challenge-014
parent6f77e37cd4c56a8be6a4e9cfa223039312b6ac5f (diff)
downloadperlweeklychallenge-club-340077561429f72df4ef99b7a00d63c8c267d0e1.tar.gz
perlweeklychallenge-club-340077561429f72df4ef99b7a00d63c8c267d0e1.tar.bz2
perlweeklychallenge-club-340077561429f72df4ef99b7a00d63c8c267d0e1.zip
Added solutions by Yozen Hernandez for challenges 1 and 2 for week 14
Diffstat (limited to 'challenge-014')
-rw-r--r--challenge-014/yozen-hernandez/blog.txt1
-rwxr-xr-xchallenge-014/yozen-hernandez/perl5/ch-1.pl43
-rwxr-xr-xchallenge-014/yozen-hernandez/perl5/ch-2.pl71
-rwxr-xr-xchallenge-014/yozen-hernandez/perl5/ch-2a.pl161
4 files changed, 276 insertions, 0 deletions
diff --git a/challenge-014/yozen-hernandez/blog.txt b/challenge-014/yozen-hernandez/blog.txt
new file mode 100644
index 0000000000..0a56e79acc
--- /dev/null
+++ b/challenge-014/yozen-hernandez/blog.txt
@@ -0,0 +1 @@
+https://yzhernand.github.io/posts/perl-weekly-challenge-14/
diff --git a/challenge-014/yozen-hernandez/perl5/ch-1.pl b/challenge-014/yozen-hernandez/perl5/ch-1.pl
new file mode 100755
index 0000000000..9eda40d5dd
--- /dev/null
+++ b/challenge-014/yozen-hernandez/perl5/ch-1.pl
@@ -0,0 +1,43 @@
+#!/usr/bin/env perl
+
+use v5.24;
+use strict;
+use warnings;
+use feature qw(say state signatures);
+no warnings "experimental::signatures";
+use Memoize;
+use List::Util qw(first);
+
+# Write a script to generate Van Eck’s sequence starts with 0. For more
+# information, please check out wikipedia page. This challenge was proposed by
+# team member Andrezgz.
+
+# Use Memoize to cache intermediate values in this function
+memoize('van_eck_seq');
+
+=over
+
+=item get_workflow_type_row($n, $init = 0) : @sequence
+
+Return a list of the first $n+1 terms in van Eck's sequence.
+Supplying a value for $init changes the initial (0th) term.
+By default, $init = 0.
+
+=back
+
+=cut
+sub van_eck_seq ( $n, $init = 0 ) {
+
+ # Base case. It should always be $init followed
+ # by 0, given the definition.
+ return ( $init, 0 ) if ( $n == 0 );
+
+ my @seq = van_eck_seq( $n - 1, $init );
+ my $m = first { $seq[$_] == $seq[$n] } reverse( 0 .. ( $n - 1 ) );
+ my $val = ( defined $m ) ? ( $n - $m ) : 0;
+
+ return @seq, $val;
+}
+
+local $, = ", ";
+say van_eck_seq(10);
diff --git a/challenge-014/yozen-hernandez/perl5/ch-2.pl b/challenge-014/yozen-hernandez/perl5/ch-2.pl
new file mode 100755
index 0000000000..cefb7951f8
--- /dev/null
+++ b/challenge-014/yozen-hernandez/perl5/ch-2.pl
@@ -0,0 +1,71 @@
+#!/usr/bin/env perl
+
+use v5.24;
+use strict;
+use warnings;
+use feature qw(say state);
+use List::Util qw(all);
+use JSON;
+
+# Using only the official postal (2-letter) abbreviations for the 50 U.S.
+# states, write a script to find the longest English word you can spell? Here is
+# the list of U.S. states abbreviations as per wikipedia
+# [page](https://en.wikipedia.org/wiki/List_of_U.S._state_abbreviations). This
+# challenge was proposed by team member Neil Bowers.
+
+my @us_states = qw( AL AK AZ AR CA
+ CO CT DE FL GA
+ HI ID IL IN IA
+ KS KY LA ME MD
+ MA MI MN MS MO
+ MT NE NV NH NJ
+ NM NY NC ND OH
+ OK OR PA RI SC
+ SD TN TX UT VT
+ VA WA WV WI WY );
+
+my %us_states_to_val = map { $_ => 1 } @us_states;
+
+my %longest_word = ( words => [], length => 0 );
+
+# Scan the a word list
+# For each word, split into an array every two characters.
+# Ensure all elements are represented in the hash above.
+# For those passing that test, check if they are the longest word
+# possible to construct this way
+# From https://github.com/dwyl/english-words
+# open my $wl, "<", "$ENV{HOME}/Downloads/words_alpha.txt";
+open my $wl, "<", "/usr/share/dict/words";
+while ( my $w = <$wl> ) {
+
+ # Chomp and drop apostrophes or any other
+ # non-alphabetical characters
+ chomp $w;
+ my $w_alpha = $w;
+ $w_alpha =~ s/[^[:alpha:]]//g;
+
+ # Skip unless length is even: all US state codes are bigrams
+ next unless ( length($w_alpha) % 2 == 0 );
+
+ # Use unpack to split word into an array of bigrams
+ # and use UC to make it all uppercase
+ my @w_split = unpack "(a2)*", uc($w_alpha);
+
+ # Skip if the set created from the word is not a
+ # proper subset of the US state codes list.
+ next unless all { exists $us_states_to_val{$_} } @w_split;
+
+ # By now, we can be sure that the word is only
+ # composed of elements in the us_states list.
+ if ( length($w_alpha) > $longest_word{length} ) {
+ @longest_word{qw(words length)} = ( [$w], length($w) );
+ }
+ elsif ( length($w_alpha) == $longest_word{length} ) {
+ push $longest_word{words}->@*, $w;
+ }
+}
+
+# Now just output what we found:
+say "Longest word(s) constructed using USPS state codes: "
+ . join( ", ", $longest_word{words}->@* )
+ . "\nwith a length of $longest_word{length} alphabetical characters"; \ No newline at end of file
diff --git a/challenge-014/yozen-hernandez/perl5/ch-2a.pl b/challenge-014/yozen-hernandez/perl5/ch-2a.pl
new file mode 100755
index 0000000000..c51e2e0853
--- /dev/null
+++ b/challenge-014/yozen-hernandez/perl5/ch-2a.pl
@@ -0,0 +1,161 @@
+#!/usr/bin/env perl
+
+use v5.24;
+use strict;
+use warnings;
+use feature qw(say state signatures);
+no warnings "experimental::signatures";
+use List::Util qw(all uniqstr first);
+use JSON;
+use Tree::Trie;
+use Graph;
+
+# Challenge 2a: what's the longest word you can spell by traversing US states,
+# taking the initial or initials of the states as you pass through them, without
+# revisiting any states?
+
+=over
+
+=item get_paths( $graph, $trie, $vertex, $data, %seen ) : @paths_strings
+
+Takes a Graph, Tree::Trie, vertex (string) in $graph, a reference to a hash of
+US state data which must include a key called "initials", and a hash of vertex
+=> index key-value pairs. The hash is not a reference, so modifications to the
+hash in nested calls to this function will not modify the data in the parent
+call.
+
+Returns a list of key-value pairs which can be coerced into a hash.
+The key is the path of states taken, and the value is the string produced.
+
+=back
+
+=cut
+sub get_paths ( $graph, $trie, $vertex, $data, %seen ) {
+ $seen{$vertex} = 1 + ( keys %seen );
+
+ my $string_so_far = fc(
+ join( "",
+ map { $data->{$_}->{initials} }
+ ( sort { $seen{$a} <=> $seen{$b} } keys %seen ) )
+ );
+
+ # Filter out successive vertexes which have already been visited
+ # and which would not produce a word in the trie
+ my @pot_successors = grep {
+ !( exists $seen{$_} )
+ && $trie->lookup(
+ $string_so_far . fc( $data->{$_}->{initials} ) );
+ } sort { $a cmp $b } $graph->neighbors($vertex);
+
+ # Base case: no more successors, done with this path
+ if ( !@pot_successors ) {
+ return ( join( "->", sort { $seen{$a} <=> $seen{$b} } keys %seen ),
+ $string_so_far );
+ }
+ else {
+ # Process successors
+ return
+ map { get_paths( $graph, $trie, $_, $data, %seen ) }
+ @pot_successors;
+ }
+
+}
+
+
+# Read in US state adjacency data from JSON file.
+my %us_adj_data;
+{
+ open my $adj_data, "<", "$ENV{HOME}/Downloads/usa-state-data.json";
+
+ # Set $/ to undef so we can slurp the data in the file. Then
+ # use decode_json to save as a Perl hash. decode_json returns
+ # a hashref and I wanted a hash, so use postfix deref.
+ local $/;
+ %us_adj_data = decode_json(<$adj_data>)->%*;
+ close $adj_data;
+}
+
+# Build a graph we can traverse of the states, based on their adjacency.
+# Only save states which share borders with another state (the 48
+# continental states).
+my $us_graph = Graph->new(
+ vertices =>
+ [ grep { $us_adj_data{$_}->{adjacent}->@* > 0 } keys %us_adj_data ],
+ edges => [
+ map {
+ my $i = $_;
+ map { [ $i, $_ ] } $us_adj_data{$_}->{adjacent}->@*
+ } keys(%us_adj_data)
+ ]
+);
+
+# Build a trie (prefix tree) of our dictionary that we can query
+my $eng_words = Tree::Trie->new( deepsearch => "boolean" );
+
+# From https://github.com/dwyl/english-words
+# open my $wl, "<", "$ENV{HOME}/Downloads/words_alpha.txt";
+open my $wl, "<", "/usr/share/dict/words";
+while ( my $w = <$wl> ) {
+
+ # Chomp and fold case. This may add words
+ # with apostrophes and, in this case, we will
+ # never match with those
+ chomp $w;
+ $eng_words->add( fc($w) );
+}
+
+# Use a hash to save data on the longest word(s) found.
+my %longest_word = ( paths => [], words => [], length => 0 );
+
+# Iterate over all states in the graph.
+for my $state ( sort $us_graph->vertices ) {
+
+ # Finds all paths with strings that may be in the dictionary and
+ # save them into a hash.
+ my %paths = get_paths( $us_graph, $eng_words, $state, \%us_adj_data );
+
+ # Set the trie to do exact string search only
+ $eng_words->deepsearch("exact");
+
+ # Iterate over all the path->word elements in the %paths hash
+ while ( my ( $path, $search ) = each %paths ) {
+
+ # Drop current path from the hash
+ delete $paths{$path};
+
+ # Do an exact string search for the word
+ my $match = $eng_words->lookup($search);
+ if ( $match && $match eq $search ) {
+
+ # A word was found in the dictionary and its longer
+ # than the longest seen so far. Clear the longest_word
+ # hash and save the new data.
+ if ( length($search) > $longest_word{length} ) {
+ @longest_word{qw(paths words length)}
+ = ( [$path], [$match], length($search) );
+ }
+ elsif ( length($search) == $longest_word{length} ) {
+
+ # A word of the same length as the longest seen
+ # was found. Push the data onto the list members
+ # of the longest_word hash.
+ push $longest_word{paths}->@*, $path;
+ push $longest_word{words}->@*, $match;
+ }
+
+ # last;
+ }
+ }
+
+ # Change search setting again so we can do prefix searches on the
+ # next iteration again.
+ $eng_words->deepsearch("boolean");
+}
+
+say "Longest word(s) constructed using initials of US states: ";
+for my $i ( 0 .. ( $longest_word{words}->@* - 1 ) ) {
+ say join( ", ", $longest_word{paths}->[$i] ) . " = "
+ . join( ", ", $longest_word{words}->[$i] );
+}
+say "with a length of $longest_word{length} alphabetical characters";
+