The different tokenizers will split those differently.
$ python
>>> from nltk import WordPunctTokenizer
>>> from nltk import TreebankWordTokenizer
>>> from nltk.tag import pos_tag
>>> wpt = WordPunctTokenizer()
>>> tbt = TreebankWordTokenizer()
>>> wpt.tokenize("This is Bob's sandwich.")
['This', 'is', 'Bob', "'", 's', 'sandwich', '.']
>>> tbt.tokenize("This is Bob's sandwich.")
['This', 'is', 'Bob', "'s", 'sandwich', '.']
>>> pos_tag(wpt.tokenize("This is Bob's sandwich."))
[('This', 'DT'), ('is', 'VBZ'), ('Bob', 'NNP'), ("'", 'POS'), ('s', 'NNS'), ('sandwich', 'VBP'), ('.', '.')]
>>> pos_tag(tbt.tokenize("This is Bob's sandwich."))
[('This', 'DT'), ('is', 'VBZ'), ('Bob', 'NNP'), ("'s", 'POS'), ('sandwich', 'NN'), ('.', '.')]
>>>