Remove duplicates from end of file

1/p
----
A
B
C
A
C
o/p
---
B
A
C
From input file it should remove duplicates from end without changing order

In should be handy to use tac in this case:

tac file | awk '!a[$0]++' | tac

This perl script should do what you want.

#!/usr/bin/perl

use strict;

my %seen;     # hash used to determine if a line has been seen.
my @a_line;   # array to store unique lines.
my $line;     # variable to store each line.

#
# Loop through all lines in the file
#
while (<>)
{
   #
   # Ignore the line if it has been seen before.
   #
   unless ($seen{$_})
   {

      #
      # Increment the hash for the line.
      #
      $seen{$_}++;

      #
      # Store the line in the array of lines.
      #
      push( @a_line, $_);
   }
}

# 
# Print out each line in the array of lines.
#
foreach (@a_line)
{
   print "$_";
}

exit
#
# end of dup.pl
#

Sample Input Data

B
A
C
A
C

Output

./dup.pl < dup.dat
B
A
C

Hope this helps.

tail -r file| awk '!x[$0]++' | tail -r
#!/bin/bash
gawk  'BEGIN{
    RS="\n$"
    FS="\n"
}
{
 for(o=NF;o>0;o--){
    if (!($o in a) ){
        s[++d]=$o
    }
    a[$o]
 }
}END{
 for(i=d;i>0;i--){
    print s
 }
}' file

output

# ./shell.sh
B
A
C

while(<DATA>){
	chomp;
	$hash{$_}=$.
}
print join "\n", sort {$hash{$a}<=>$hash{$b}} keys %hash; 
__DATA__
A
B
C
A
C