HashMap是基于哈希表的 Map 接口实现的,HashMap继承于AbstractMap,实现了Map、Cloneable、java.io.Serializable接口。以key-value键值对的形式存在。在HashMap中,key-value总是会当做一个整体来处理,系统会根据hash算法来来计算key-value的存储位置,我们总是可以通过key快速地存、取value。
1)HashMap的使用
对于HashMap,我们最常使用的方法有:
1、put(Object key,Object value)添加元素
2、get(Object key)根据键来获取对应的值
3、remove(Object key)删除指定key值的元素
4、values()得到value集
5、isEmpty()如果此映射不包含键-值映射关系,则返回 true。
6、entrySet()返回此映射所包含的映射关系的 Set 视图。
7、keySet()返回此映射中所包含的键的 Set 视图
package 集合;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
public class HashMapTest {
public static void main(String[] args) {
HashMap<Integer, String> map = new HashMap<>();
map.put(7, "c罗");
map.put(12, "马塞洛");
map.put(9, "本泽马");
map.put(4, "拉莫斯");
map.put(14, "卡塞米罗");
map.put(8, "克罗斯");
//根据键来获取对应的值
System.out.println("根据键来获取对应的值:" + map.get(7));
System.out.println("根据键来获取对应的值:" + map.get(4));
//删除指定key值的元素
map.remove(12);
//得到value集
System.out.println("value集:" + map.values());
//判空
System.out.println(map.isEmpty());
//set视图
System.out.println(map.keySet());
System.out.println("EntrySet遍历:");
Iterator itr = map.entrySet().iterator();
while ( itr.hasNext()){
Map.Entry<Integer, String> entry = (Map.Entry<Integer, String>) itr.next();
System.out.println(entry.getKey() + entry.getValue());
}
}
}
运行结果
2)HashMap底层结构
通过散列函数计算,如果不同的key对应同一个索引位置那么会造成哈希冲突,使用使用链地址法。
简单来说,HashMap由数组+链表组成,数组是HashMap的主体,链表则是主要为了解决哈希冲突而存在的。如果定位到的数组位置不含链表(当前entry的next指向null),那么对于查找,添加等操作很快,仅需一次寻址即可;如果定位到的数组包含链表,对于添加操作,其时间复杂度为O(n),首先遍历链表,存在即覆盖,否则新增;对于查找操作来讲,仍需遍历链表,然后通过key对象的equals方法逐一比对查找。
根据key值通过加密算法计算出数组中的索引index,如果数组的index位置为null,则直接插入,否则遍历index位置的链表判断key是否存在,key存在直接覆盖oldvalue,遍历完key不存在尾插法插入当前链表之后。
在jdk1.8以后,当链表长度超过阀值8时,将链表转换为红黑树,将查找时间从O(N)变为O(log2 N),大大减少查找时间。
3)HashMap源码分析
默认容量DEFAULT_INITIAL_CAPACITY 为16。
默认加载因子DEFAULT_LOAD_FACTOR为 0.75。
构造函数
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and load factor.
*
* @param initialCapacity the initial capacity
* @param loadFactor the load factor
* @throws IllegalArgumentException if the initial capacity is negative
* or the load factor is nonpositive
*/
public HashMap(int initialCapacity, float loadFactor) {
//初始容量不能<0
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal initial capacity: " +
initialCapacity);
//初始容量不能 > 最大容量值,HashMap的最大容量值为2^30
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
//加载因子不能 < 0
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
//设置阈值为 》=初始化容量的 2的n次方的值
this.threshold = tableSizeFor(initialCapacity);
}
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and the default load factor (0.75).
*
* @param initialCapacity the initial capacity.
* @throws IllegalArgumentException if the initial capacity is negative.
*/
public HashMap(int initialCapacity) {
//指定初始化容量的构造函数
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
/**
* Constructs an empty <tt>HashMap</tt> with the default initial capacity
* (16) and the default load factor (0.75).
*/
public HashMap() {
//默认构造函数,赋值加载因子为默认的0.75f
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
}
/**
* Constructs a new <tt>HashMap</tt> with the same mappings as the
* specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
* default load factor (0.75) and an initial capacity sufficient to
* hold the mappings in the specified <tt>Map</tt>.
*
* @param m the map whose mappings are to be placed in this map
* @throws NullPointerException if the specified map is null
*/
//新建一个哈希表,同时将另一个map m 里的所有元素加入表中
public HashMap(Map<? extends K, ? extends V> m) {
this.loadFactor = DEFAULT_LOAD_FACTOR;
putMapEntries(m, false);
}
扩容函数
/**
* Initializes or doubles table size. If null, allocates in
* accord with initial capacity target held in field threshold.
* Otherwise, because we are using power-of-two expansion, the
* elements from each bin must either stay at same index, or move
* with a power of two offset in the new table.
*
* @return the table
*/
final Node<K,V>[] resize() {
//oldTab 为当前表的哈希桶
Node<K,V>[] oldTab = table;
//当前哈希桶的容量 length
int oldCap = (oldTab == null) ? 0 : oldTab.length;
//当前的阈值
int oldThr = threshold;
//初始化新的容量和阈值为0
int newCap, newThr = 0;
//如果当前容量大于0
if (oldCap > 0) {
//如果当前容量已经到达上限
if (oldCap >= MAXIMUM_CAPACITY) {
//则设置阈值是2的31次方-1
threshold = Integer.MAX_VALUE;
//同时返回当前的哈希桶,不再扩容
return oldTab;
}//否则新的容量为旧的容量的两倍。
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
oldCap >= DEFAULT_INITIAL_CAPACITY)//如果旧的容量大于等于默认初始容量16
//那么新的阈值也等于旧的阈值的两倍
newThr = oldThr << 1; // double threshold
}//如果当前表是空的,但是有阈值。代表是初始化时指定了容量、阈值的情况
else if (oldThr > 0) // initial capacity was placed in threshold
newCap = oldThr;//那么新表的容量就等于旧的阈值
else {}//如果当前表是空的,而且也没有阈值。代表是初始化时没有任何容量/阈值参数的情况 // zero initial threshold signifies using defaults
newCap = DEFAULT_INITIAL_CAPACITY;//此时新表的容量为默认的容量 16
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);//新的阈值为默认容量16 * 默认加载因子0.75f = 12
}
if (newThr == 0) {//如果新的阈值是0,对应的是 当前表是空的,但是有阈值的情况
float ft = (float)newCap * loadFactor;//根据新表容量 和 加载因子 求出新的阈值
//进行越界修复
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
//更新阈值
threshold = newThr;
@SuppressWarnings({"rawtypes","unchecked"})
//根据新的容量 构建新的哈希桶
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
//更新哈希桶引用
table = newTab;
//如果以前的哈希桶中有元素
//下面开始将当前哈希桶中的所有节点转移到新的哈希桶中
if (oldTab != null) {
//遍历老的哈希桶
for (int j = 0; j < oldCap; ++j) {
//取出当前的节点 e
Node<K,V> e;
//如果当前桶中有元素,则将链表赋值给e
if ((e = oldTab[j]) != null) {
//将原哈希桶置空以便GC
oldTab[j] = null;
//如果当前链表中就一个元素,(没有发生哈希碰撞)
if (e.next == null)
//直接将这个元素放置在新的哈希桶里。
//注意这里取下标 是用 哈希值 与 桶的长度-1 。 由于桶的长度是2的n次方,这么做其实是等于 一个模运算。但是效率更高
newTab[e.hash & (newCap - 1)] = e;
//如果发生过哈希碰撞 ,而且是节点数超过8个,转化成了红黑树(暂且不谈 避免过于复杂, 后续专门研究一下红黑树)
else if (e instanceof TreeNode)
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
//如果发生过哈希碰撞,节点数小于8个。则要根据链表上每个节点的哈希值,依次放入新哈希桶对应下标位置。
else { // preserve order
//因为扩容是容量翻倍,所以原链表上的每个节点,现在可能存放在原来的下标,即low位, 或者扩容后的下标,即high位。 high位= low位+原哈希桶容量
//低位链表的头结点、尾节点
Node<K,V> loHead = null, loTail = null;
//高位链表的头节点、尾节点
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;//临时节点 存放e的下一个节点
do {
next = e.next;
//这里又是一个利用位运算 代替常规运算的高效点: 利用哈希值 与 旧的容量,可以得到哈希值去模后,是大于等于oldCap还是小于oldCap,等于0代表小于oldCap,应该存放在低位,否则存放在高位
if ((e.hash & oldCap) == 0) {
//给头尾节点指针赋值
if (loTail == null)
loHead = e;
else
loTail.next = e;
loTail = e;
}//高位也是相同的逻辑
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}//循环直到链表结束
} while ((e = next) != null);
//将低位链表存放在原index处,
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
//将高位链表存放在新index处
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
}
}
}
}
}
return newTab;
}
增,删
/**
* Associates the specified value with the specified key in this map.
* If the map previously contained a mapping for the key, the old
* value is replaced.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
public V put(K key, V value) {
//先根据key,取得hash值。 再调用上一节的方法插入节点
return putVal(hash(key), key, value, false, true);
}
/**
* Copies all of the mappings from the specified map to this map.
* These mappings will replace any mappings that this map had for
* any of the keys currently in the specified map.
*
* @param m mappings to be stored in this map
* @throws NullPointerException if the specified map is null
*/
public void putAll(Map<? extends K, ? extends V> m) {
//将另一个Map的所有元素加入表中,参数evict初始化时为false,其他情况为true
putMapEntries(m, true);
}
/**
* Removes the mapping for the specified key from this map if present.
*
* @param key key whose mapping is to be removed from the map
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
//如果key对应的value存在,则删除这个键值对。 并返回value。如果不存在 返回null
public V remove(Object key) {
Node<K,V> e;
return (e = removeNode(hash(key), key, null, false, true)) == null ?
null : e.value;
}
/**
* Implements Map.remove and related methods.
*
* @param hash hash for key
* @param key the key
* @param value the value to match if matchValue, else ignored
* @param matchValue if true only remove if value is equal
* @param movable if false do not move other nodes while removing
* @return the node, or null if none
*/
final Node<K,V> removeNode(int hash, Object key, Object value,
boolean matchValue, boolean movable) {
// p 是待删除节点的前置节点
Node<K,V>[] tab; Node<K,V> p; int n, index;
//如果哈希表不为空,则根据hash值算出的index下 有节点的话。
if ((tab = table) != null && (n = tab.length) > 0 &&
(p = tab[index = (n - 1) & hash]) != null) {
//node是待删除节点
Node<K,V> node = null, e; K k; V v;
//如果链表头的就是需要删除的节点
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
//将待删除节点引用赋给node
node = p;
else if ((e = p.next) != null) {
//否则循环遍历 找到待删除节点,赋值给node
if (p instanceof TreeNode)
node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
else {
do {
if (e.hash == hash &&
((k = e.key) == key ||
(key != null && key.equals(k)))) {
node = e;
break;
}
p = e;
} while ((e = e.next) != null);
}
}
//如果有待删除节点node, 且 matchValue为false,或者值也相等
if (node != null && (!matchValue || (v = node.value) == value ||
(value != null && value.equals(v)))) {
if (node instanceof TreeNode)
((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
else if (node == p)//如果node == p,说明是链表头是待删除节点
tab[index] = node.next;
else//否则待删除节点在表中间
p.next = node.next;
++modCount;//修改modCount
--size;//size减1
afterNodeRemoval(node);
return node;
}
}
return null;
}
遍历
/**
* Returns a {@link Set} view of the mappings contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation, or through the
* <tt>setValue</tt> operation on a map entry returned by the
* iterator) the results of the iteration are undefined. The set
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
* <tt>clear</tt> operations. It does not support the
* <tt>add</tt> or <tt>addAll</tt> operations.
*
* @return a set view of the mappings contained in this map
*/
public Set<Map.Entry<K,V>> entrySet() {
Set<Map.Entry<K,V>> es;
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
}
final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
public final int size() { return size; }
public final void clear() { HashMap.this.clear(); }
public final Iterator<Map.Entry<K,V>> iterator() {
return new EntryIterator();
}
public final boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Node<K,V> candidate = getNode(hash(key), key);
return candidate != null && candidate.equals(e);
}
public final boolean remove(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Object value = e.getValue();
return removeNode(hash(key), key, value, true, true) != null;
}
return false;
}
public final Spliterator<Map.Entry<K,V>> spliterator() {
return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);
}
public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
}